1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2026-05-16 09:48:14 +02:00

Read value from ptrack_control via function call. Code cleanup

This commit is contained in:
Anastasia
2017-10-02 18:31:46 +03:00
parent 18e49a45f5
commit 315cf3bc92
5 changed files with 64 additions and 186 deletions
+39 -26
View File
@@ -112,6 +112,7 @@ static char *pg_ptrack_get_and_clear(Oid tablespace_oid,
Oid db_oid,
Oid rel_oid,
size_t *result_size);
static XLogRecPtr get_last_ptrack_lsn(void);
/* Check functions */
static void check_server_version(void);
@@ -1814,25 +1815,6 @@ backup_disconnect(bool fatal, void *userdata)
pgut_disconnect(master_conn);
}
/* Count bytes in file */
static long
file_size(const char *file_path)
{
long r;
FILE *f = fopen(file_path, "r");
if (!f)
{
elog(ERROR, "%s: cannot open file \"%s\" for reading: %s\n",
PROGRAM_NAME ,file_path, strerror(errno));
return -1;
}
fseek(f, 0, SEEK_END);
r = ftell(f);
fclose(f);
return r;
}
/*
* Take a backup of the PGDATA at a file level.
* Copy all directories and files listed in backup_files_list.
@@ -2044,6 +2026,10 @@ parse_backup_filelist_filenames(parray *files, const char *root)
}
else if (isdigit(filename[0]))
{
/*
* TODO TODO TODO Files of this type can be compressed by cfs.
* Check that and do not mark them with 'is_datafile' flag.
*/
char *forkNameptr;
char *suffix = palloc(MAXPGPATH);;
@@ -2090,6 +2076,12 @@ parse_backup_filelist_filenames(parray *files, const char *root)
}
}
}
if (strcmp(filename, "pg_internal.init") == 0)
{
elog(INFO, "filename %s, path %s, dbOid %u, tblspcOid %u is_datafile %s",
filename, file->path, file->dbOid, file->tblspcOid, file->is_datafile?"true":"false");
}
}
}
@@ -2217,16 +2209,18 @@ make_pagemap_from_ptrack(parray *files)
Assert(filename != NULL);
filename++;
/*
* The function pg_ptrack_get_and_clear_db returns true
* if there was a ptrack_init file.
* And always backup all files from template0 database and global/
*/
if((strcmp(filename, "1") == 0) ||
(strcmp(filename, "12442") == 0))
/* Always backup all files from template0, template1 databases */
if((file->dbOid == 1) || //dbOid of template1 daatbase
(file->dbOid == 12442)) //dbOid of template0 daatbase
{
is_template = true;
}
/*
* The function pg_ptrack_get_and_clear_db returns true
* if there was a ptrack_init file.
* Also ignore ptrack files for global tablespace,
* to avoid any possible specific errors.
*/
else if ((file->tblspcOid == GLOBALTABLESPACE_OID) ||
pg_ptrack_get_and_clear_db(file->dbOid, file->tblspcOid))
{
@@ -2441,3 +2435,22 @@ StreamLog(void *arg)
PQfinish(conn);
conn = NULL;
}
/*
* Get lsn of the moment when ptrack was enabled the last time.
*/
static XLogRecPtr
get_last_ptrack_lsn(void)
{
PGresult *res;
XLogRecPtr lsn;
res = pgut_execute(backup_conn, "select pg_ptrack_control_lsn()", 0, NULL);
lsn = atoi(PQgetvalue(res, 0, 0));
elog(INFO, "get_last_ptrack_lsn(): lsn %lu", lsn);
PQclear(res);
return lsn;
}
+25 -136
View File
@@ -22,6 +22,7 @@
#include <common/pg_lzcompress.h>
#include <zlib.h>
/* Implementation of zlib compression method */
static size_t zlib_compress(void* dst, size_t dst_size, void const* src, size_t src_size)
{
uLongf compressed_size = dst_size;
@@ -29,6 +30,7 @@ static size_t zlib_compress(void* dst, size_t dst_size, void const* src, size_t
return rc == Z_OK ? compressed_size : rc;
}
/* Implementation of zlib compression method */
static size_t zlib_decompress(void* dst, size_t dst_size, void const* src, size_t src_size)
{
uLongf dest_len = dst_size;
@@ -36,6 +38,10 @@ static size_t zlib_decompress(void* dst, size_t dst_size, void const* src, size_
return rc == Z_OK ? dest_len : rc;
}
/*
* Compresses source into dest using algorithm. Returns the number of bytes
* written in the destination buffer, or -1 if compression fails.
*/
static size_t
do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, CompressAlg alg)
{
@@ -53,6 +59,10 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, Compre
return -1;
}
/*
* Decompresses source into dest using algorithm. Returns the number of bytes
* decompressed in the destination buffer, or -1 if decompression fails.
*/
static size_t
do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size, CompressAlg alg)
{
@@ -70,8 +80,10 @@ do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size, Comp
return -1;
}
/*
* When copying datafiles to backup we validate and compress them block
* by block. Thus special header is required for each data block.
*/
typedef struct BackupPageHeader
{
BlockNumber block; /* block number */
@@ -125,6 +137,11 @@ backup_data_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
header.block = blknum;
offset = blknum * BLCKSZ;
/*
* Read the page and verify its header and checksum.
* Under high write load it's possible that we've read partly
* flushed page, so try several times befor throwing an error.
*/
while(try_checksum--)
{
if (fseek(in, offset, SEEK_SET) != 0)
@@ -223,12 +240,14 @@ backup_data_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
Assert (header.compressed_size <= BLCKSZ);
write_buffer_size = sizeof(header);
/* The page was successfully compressed */
if (header.compressed_size > 0)
{
memcpy(write_buffer, &header, sizeof(header));
memcpy(write_buffer + sizeof(header), compressed_page.data, header.compressed_size);
write_buffer_size += MAXALIGN(header.compressed_size);
}
/* The page compression failed. Write it as is. */
else
{
header.compressed_size = BLCKSZ;
@@ -324,8 +343,7 @@ backup_data_file(const char *from_root, const char *to_root,
/*
* Read each page, verify checksum and write it to backup.
* If page map is not empty we scan only changed blocks, otherwise
* backup all pages of the relation.
* If page map is empty backup all pages of the relation.
*/
if (file->pagemap.bitmapsize == 0)
{
@@ -336,6 +354,7 @@ backup_data_file(const char *from_root, const char *to_root,
n_blocks_read++;
}
}
/* If page map is not empty we scan only changed blocks, */
else
{
datapagemap_iterator_t *iter;
@@ -371,7 +390,7 @@ backup_data_file(const char *from_root, const char *to_root,
FIN_CRC32C(file->crc);
/*
* If we have pagemap then file can't be a zero size.
* If we have pagemap then file in the backup can't be a zero size.
* Otherwise, we will clear the last file.
*/
if (n_blocks_read != 0 && n_blocks_read == n_blocks_skipped)
@@ -411,7 +430,7 @@ restore_data_file(const char *from_root,
/*
* Open backup file for write. We use "r+" at first to overwrite only
* modified pages for differential restore. If the file is not exists,
* modified pages for differential restore. If the file does not exist,
* re-open it with "w" to create an empty file.
*/
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
@@ -745,136 +764,6 @@ copy_wal_file(const char *from_path, const char *to_path)
fclose(in);
}
/*
* Save part of the file into backup.
* skip_size - size of the file in previous backup. We can skip it
* and copy just remaining part of the file
*/
bool
copy_file_partly(const char *from_root, const char *to_root,
pgFile *file, size_t skip_size)
{
char to_path[MAXPGPATH];
FILE *in;
FILE *out;
size_t read_len = 0;
int errno_tmp;
struct stat st;
char buf[BLCKSZ];
/* reset size summary */
file->read_size = 0;
file->write_size = 0;
/* open backup mode file for read */
in = fopen(file->path, "r");
if (in == NULL)
{
/* maybe deleted, it's not error */
if (errno == ENOENT)
return false;
elog(ERROR, "cannot open source file \"%s\": %s", file->path,
strerror(errno));
}
/* open backup file for write */
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
out = fopen(to_path, "w");
if (out == NULL)
{
int errno_tmp = errno;
fclose(in);
elog(ERROR, "cannot open destination file \"%s\": %s",
to_path, strerror(errno_tmp));
}
/* stat source file to change mode of destination file */
if (fstat(fileno(in), &st) == -1)
{
fclose(in);
fclose(out);
elog(ERROR, "cannot stat \"%s\": %s", file->path,
strerror(errno));
}
if (fseek(in, skip_size, SEEK_SET) < 0)
elog(ERROR, "cannot seek %lu of \"%s\": %s",
skip_size, file->path, strerror(errno));
/*
* copy content
* NOTE: Now CRC is not computed for compressed files now.
*/
for (;;)
{
if ((read_len = fread(buf, 1, sizeof(buf), in)) != sizeof(buf))
break;
if (fwrite(buf, 1, read_len, out) != read_len)
{
errno_tmp = errno;
/* oops */
fclose(in);
fclose(out);
elog(ERROR, "cannot write to \"%s\": %s", to_path,
strerror(errno_tmp));
}
file->write_size += sizeof(buf);
file->read_size += sizeof(buf);
}
errno_tmp = errno;
if (!feof(in))
{
fclose(in);
fclose(out);
elog(ERROR, "cannot read backup mode file \"%s\": %s",
file->path, strerror(errno_tmp));
}
/* copy odd part. */
if (read_len > 0)
{
if (fwrite(buf, 1, read_len, out) != read_len)
{
errno_tmp = errno;
/* oops */
fclose(in);
fclose(out);
elog(ERROR, "cannot write to \"%s\": %s", to_path,
strerror(errno_tmp));
}
file->write_size += read_len;
file->read_size += read_len;
}
/* update file permission */
if (chmod(to_path, st.st_mode) == -1)
{
errno_tmp = errno;
fclose(in);
fclose(out);
elog(ERROR, "cannot change mode of \"%s\": %s", to_path,
strerror(errno_tmp));
}
/* add meta information needed for recovery */
file->is_partial_copy = true;
if (fflush(out) != 0 ||
fsync(fileno(out)) != 0 ||
fclose(out))
elog(ERROR, "cannot write \"%s\": %s", to_path, strerror(errno));
fclose(in);
return true;
}
/*
* Calculate checksum of various files which are not copied from PGDATA,
* but created in process of backup, such as stream XLOG files,
-1
View File
@@ -159,7 +159,6 @@ pgFileInit(const char *path)
file->path = pgut_malloc(strlen(path) + 1);
strcpy(file->path, path); /* enough buffer size guaranteed */
file->is_cfs = false;
file->is_partial_copy = false;
file->compress_alg = NOT_DEFINED_COMPRESS;
return file;
}
-5
View File
@@ -98,8 +98,6 @@ typedef struct pgFile
int segno; /* Segment number for ptrack */
bool is_cfs; /* Flag to distinguish files compressed by CFS*/
bool is_database;
bool is_partial_copy; /* If the file was backed up via copy_file_partly().
* Only applies to is_cfs files. */
CompressAlg compress_alg; /* compression algorithm applied to the file */
volatile uint32 lock; /* lock for synchronization of parallel threads */
datapagemap_t pagemap; /* bitmap of pages updated since previous backup */
@@ -420,8 +418,6 @@ extern void restore_data_file(const char *from_root, const char *to_root,
extern bool copy_file(const char *from_root, const char *to_root,
pgFile *file);
extern void copy_wal_file(const char *from_root, const char *to_root);
extern bool copy_file_partly(const char *from_root, const char *to_root,
pgFile *file, size_t skip_size);
extern bool calc_file_checksum(pgFile *file);
@@ -449,7 +445,6 @@ extern void time2iso(char *buf, size_t len, time_t time);
extern const char *status2str(BackupStatus status);
extern void remove_trailing_space(char *buf, int comment_mark);
extern void remove_not_digit(char *buf, size_t len, const char *str);
extern XLogRecPtr get_last_ptrack_lsn(void);
extern uint32 get_data_checksum_version(bool safe);
extern char *base36enc(long unsigned int value);
extern long unsigned int base36dec(const char *text);
-18
View File
@@ -81,24 +81,6 @@ digestControlFile(ControlFileData *ControlFile, char *src, size_t size)
checkControlFile(ControlFile);
}
/*
* Get lsn of the moment when ptrack was enabled the last time.
*/
XLogRecPtr
get_last_ptrack_lsn(void)
{
char *buffer;
size_t size;
XLogRecPtr lsn;
buffer = slurpFile(pgdata, "global/ptrack_control", &size, false);
if (buffer == NULL)
return 0;
lsn = *(XLogRecPtr *)buffer;
return lsn;
}
/*
* Utility shared by backup and restore to fetch the current timeline
* used by a node.