From b455420a18b1470726c3b44714cbc74800d40df2 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 7 Dec 2018 23:39:39 +0300 Subject: [PATCH 01/18] tests: more merge tests added --- tests/merge.py | 153 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) diff --git a/tests/merge.py b/tests/merge.py index c3b3169a..e4209441 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -316,6 +316,159 @@ class MergeTest(ProbackupTest, unittest.TestCase): node.cleanup() self.del_test_dir(module_name, fname) + def test_merge_compressed_and_uncompressed_backups_1(self): + """ + Test MERGE command with compressed and uncompressed backups + """ + fname = self.id().split(".")[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + + # Initialize instance and backup directory + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(module_name, fname), + initdb_params=["--data-checksums"], + pg_options={ + 'autovacuum': 'off' + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, "node", node) + self.set_archiving(backup_dir, "node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=5) + + # Do compressed FULL backup + self.backup_node(backup_dir, "node", node, options=[ + '--compress-algorithm=zlib', '--stream']) + show_backup = self.show_pb(backup_dir, "node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Change data + pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed DELTA backup + self.backup_node( + backup_dir, "node", node, backup_type="delta", + options=['--stream']) + + # Change data + pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum']) + pgbench.wait() + + # Do compressed PAGE backup + self.backup_node( + backup_dir, "node", node, backup_type="page", + options=['--compress-algorithm=zlib']) + + pgdata = self.pgdata_content(node.data_dir) + + show_backup = self.show_pb(backup_dir, "node")[2] + page_id = show_backup["id"] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Merge all backups + self.merge_backup(backup_dir, "node", page_id) + show_backups = self.show_pb(backup_dir, "node") + + self.assertEqual(len(show_backups), 1) + self.assertEqual(show_backups[0]["status"], "OK") + self.assertEqual(show_backups[0]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + node.cleanup() + self.del_test_dir(module_name, fname) + + def test_merge_compressed_and_uncompressed_backups_2(self): + """ + Test MERGE command with compressed and uncompressed backups + """ + fname = self.id().split(".")[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + + # Initialize instance and backup directory + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(module_name, fname), + initdb_params=["--data-checksums"], + pg_options={ + 'autovacuum': 'off' + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, "node", node) + self.set_archiving(backup_dir, "node", node) + node.slow_start() + + # Fill with data + node.pgbench_init(scale=5) + + # Do uncompressed FULL backup + self.backup_node(backup_dir, "node", node) + show_backup = self.show_pb(backup_dir, "node")[0] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "FULL") + + # Change data + pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum']) + pgbench.wait() + + # Do compressed DELTA backup + self.backup_node( + backup_dir, "node", node, backup_type="delta", + options=['--compress-algorithm=zlib', '--stream']) + + # Change data + pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum']) + pgbench.wait() + + # Do uncompressed PAGE backup + self.backup_node( + backup_dir, "node", node, backup_type="page") + + pgdata = self.pgdata_content(node.data_dir) + + show_backup = self.show_pb(backup_dir, "node")[2] + page_id = show_backup["id"] + + self.assertEqual(show_backup["status"], "OK") + self.assertEqual(show_backup["backup-mode"], "PAGE") + + # Merge all backups + self.merge_backup(backup_dir, "node", page_id) + show_backups = self.show_pb(backup_dir, "node") + + self.assertEqual(len(show_backups), 1) + self.assertEqual(show_backups[0]["status"], "OK") + self.assertEqual(show_backups[0]["backup-mode"], "FULL") + + # Drop node and restore it + node.cleanup() + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + node.cleanup() + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") def test_merge_tablespaces(self): """ From c1903db22b0812f3b327a8e3a0cba3299413d7d2 Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Mon, 10 Dec 2018 16:19:51 +0300 Subject: [PATCH 02/18] Fix format warnings --- src/archive.c | 4 ++-- src/data.c | 12 ++++++------ src/restore.c | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/archive.c b/src/archive.c index 3333f096..774d12c4 100644 --- a/src/archive.c +++ b/src/archive.c @@ -27,7 +27,7 @@ do_archive_push(char *wal_file_path, char *wal_file_name, bool overwrite) char backup_wal_file_path[MAXPGPATH]; char absolute_wal_file_path[MAXPGPATH]; char current_dir[MAXPGPATH]; - int64 system_id; + uint64 system_id; bool is_compress = false; if (wal_file_name == NULL && wal_file_path == NULL) @@ -50,7 +50,7 @@ do_archive_push(char *wal_file_path, char *wal_file_name, bool overwrite) if(system_id != instance_config.system_identifier) elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch." - "Instance '%s' should have SYSTEM_ID = %ld instead of %ld", + "Instance '%s' should have SYSTEM_ID = " UINT64_FORMAT " instead of " UINT64_FORMAT, wal_file_name, instance_name, instance_config.system_identifier, system_id); diff --git a/src/data.c b/src/data.c index de649103..0d0c5e5c 100644 --- a/src/data.c +++ b/src/data.c @@ -232,8 +232,8 @@ read_page_from_file(pgFile *file, BlockNumber blknum, return 0; } else - elog(WARNING, "File: %s, block %u, expected block size %d," - "but read %lu, try again", + elog(WARNING, "File: %s, block %u, expected block size %u," + "but read %zu, try again", file->path, blknum, BLCKSZ, read_len); } @@ -382,7 +382,7 @@ prepare_page(backup_files_arg *arguments, else if (page_size != BLCKSZ) { free(ptrack_page); - elog(ERROR, "File: %s, block %u, expected block size %d, but read %lu", + elog(ERROR, "File: %s, block %u, expected block size %d, but read %zu", file->path, absolute_blknum, BLCKSZ, page_size); } else @@ -574,7 +574,7 @@ backup_data_file(backup_files_arg* arguments, if (file->size % BLCKSZ != 0) { fclose(in); - elog(ERROR, "File: %s, invalid file size %lu", file->path, file->size); + elog(ERROR, "File: %s, invalid file size %zu", file->path, file->size); } /* @@ -789,7 +789,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate, read_len = fread(compressed_page.data, 1, MAXALIGN(header.compressed_size), in); if (read_len != MAXALIGN(header.compressed_size)) - elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d", + elog(ERROR, "cannot read block %u of \"%s\" read %zu of %d", blknum, file->path, read_len, header.compressed_size); /* @@ -1620,7 +1620,7 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version, read_len = fread(compressed_page.data, 1, MAXALIGN(header.compressed_size), in); if (read_len != MAXALIGN(header.compressed_size)) - elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d", + elog(ERROR, "cannot read block %u of \"%s\" read %zu of %d", blknum, file->path, read_len, header.compressed_size); COMP_FILE_CRC32(use_crc32c, crc, compressed_page.data, read_len); diff --git a/src/restore.c b/src/restore.c index 8770c358..e3591e06 100644 --- a/src/restore.c +++ b/src/restore.c @@ -480,7 +480,7 @@ restore_backup(pgBackup *backup) /* By default there are some error */ threads_args[i].ret = 1; - elog(LOG, "Start thread for num:%li", parray_num(files)); + elog(LOG, "Start thread for num:%zu", parray_num(files)); pthread_create(&threads[i], NULL, restore_files, arg); } From dd18928e1984927edafe175a2a1bb30f9f64d66e Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Mon, 10 Dec 2018 18:02:22 +0300 Subject: [PATCH 03/18] Make compiler happy --- src/utils/logger.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/src/utils/logger.c b/src/utils/logger.c index fa1c0039..a4f2d692 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -232,18 +232,23 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args) * Write to stderr if the message was not written to log file. * Write to stderr if the message level is greater than WARNING anyway. */ - if (write_to_stderr) + if (write_to_stderr && write_to_file) { write_elevel(stderr, elevel); - if (write_to_file) - vfprintf(stderr, fmt, std_args); - else - vfprintf(stderr, fmt, args); + + vfprintf(stderr, fmt, std_args); fputc('\n', stderr); fflush(stderr); - if (write_to_file) - va_end(std_args); + va_end(std_args); + } + else if (write_to_stderr) + { + write_elevel(stderr, elevel); + + vfprintf(stderr, fmt, args); + fputc('\n', stderr); + fflush(stderr); } exit_if_necessary(elevel); From 9ac30ada5443e97de9152de7bab5c3b9752b2abb Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 10 Dec 2018 18:49:39 +0300 Subject: [PATCH 04/18] bugfix: correctly log cmdline to logfile --- src/pg_probackup.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 17676876..04b5d486 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -331,15 +331,6 @@ main(int argc, char *argv[]) if (rc != -1 && !S_ISDIR(stat_buf.st_mode)) elog(ERROR, "-B, --backup-path must be a path to directory"); - /* command was initialized for a few commands */ - if (command) - { - elog_file(INFO, "command: %s", command); - - pfree(command); - command = NULL; - } - /* Option --instance is required for all commands except init and show */ if (backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD && backup_subcmd != VALIDATE_CMD) @@ -390,6 +381,15 @@ main(int argc, char *argv[]) /* Initialize logger */ init_logger(backup_path, &instance_config.logger); + /* command was initialized for a few commands */ + if (command) + { + elog_file(INFO, "command: %s", command); + + pfree(command); + command = NULL; + } + /* * We have read pgdata path from command line or from configuration file. * Ensure that pgdata is an absolute path. From eead6b6307b464b67e567516609baac7fce7560c Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Mon, 10 Dec 2018 19:00:37 +0300 Subject: [PATCH 05/18] Free unnecessary file object --- src/dir.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/dir.c b/src/dir.c index 710598b7..ae8cb20b 100644 --- a/src/dir.c +++ b/src/dir.c @@ -443,6 +443,9 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink, parray_append(files, file); dir_list_file_internal(files, root, file, exclude, omit_symlink, black_list); + + if (!add_root) + pgFileFree(file); } /* From 385f979fe7daf91001eea4f4c123807bb6f576ee Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Tue, 11 Dec 2018 12:27:36 +0300 Subject: [PATCH 06/18] Make compiler happy again --- src/utils/logger.c | 98 ++++++++++++++++++++++++++-------------------- 1 file changed, 55 insertions(+), 43 deletions(-) diff --git a/src/utils/logger.c b/src/utils/logger.c index a4f2d692..ba054a62 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -40,10 +40,10 @@ typedef enum void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3); -static void elog_internal(int elevel, bool file_only, const char *fmt, va_list args) - pg_attribute_printf(3, 0); +static void elog_internal(int elevel, bool file_only, const char *message); static void elog_stderr(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3); +static char *get_log_message(const char *fmt, va_list args) pg_attribute_printf(1, 0); /* Functions to work with log files */ static void open_logfile(FILE **file, const char *filename_format); @@ -74,7 +74,7 @@ init_logger(const char *root_path, LoggerConfig *config) /* Set log path */ if (config->log_directory == NULL) { - config->log_directory = palloc(MAXPGPATH); + config->log_directory = pgut_malloc(MAXPGPATH); join_path_components(config->log_directory, root_path, LOG_DIRECTORY_DEFAULT); } @@ -148,13 +148,11 @@ exit_if_necessary(int elevel) * Actual implementation for elog() and pg_log(). */ static void -elog_internal(int elevel, bool file_only, const char *fmt, va_list args) +elog_internal(int elevel, bool file_only, const char *message) { bool write_to_file, write_to_error_log, write_to_stderr; - va_list error_args, - std_args; time_t log_time = (time_t) time(NULL); char strfbuf[128]; @@ -165,22 +163,8 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args) write_to_stderr = elevel >= logger_config.log_level_console && !file_only; pthread_lock(&log_file_mutex); -#ifdef WIN32 - std_args = NULL; - error_args = NULL; -#endif loggin_in_progress = true; - /* We need copy args only if we need write to error log file */ - if (write_to_error_log) - va_copy(error_args, args); - /* - * We need copy args only if we need write to stderr. But do not copy args - * if we need to log only to stderr. - */ - if (write_to_stderr && write_to_file) - va_copy(std_args, args); - if (write_to_file || write_to_error_log) strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&log_time)); @@ -203,8 +187,7 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args) fprintf(log_file, "%s: ", strfbuf); write_elevel(log_file, elevel); - vfprintf(log_file, fmt, args); - fputc('\n', log_file); + fprintf(log_file, "%s\n", message); fflush(log_file); } @@ -221,33 +204,19 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args) fprintf(error_log_file, "%s: ", strfbuf); write_elevel(error_log_file, elevel); - vfprintf(error_log_file, fmt, error_args); - fputc('\n', error_log_file); + fprintf(error_log_file, "%s\n", message); fflush(error_log_file); - - va_end(error_args); } /* * Write to stderr if the message was not written to log file. * Write to stderr if the message level is greater than WARNING anyway. */ - if (write_to_stderr && write_to_file) + if (write_to_stderr) { write_elevel(stderr, elevel); - vfprintf(stderr, fmt, std_args); - fputc('\n', stderr); - fflush(stderr); - - va_end(std_args); - } - else if (write_to_stderr) - { - write_elevel(stderr, elevel); - - vfprintf(stderr, fmt, args); - fputc('\n', stderr); + fprintf(stderr, "%s\n", message); fflush(stderr); } @@ -285,12 +254,44 @@ elog_stderr(int elevel, const char *fmt, ...) exit_if_necessary(elevel); } +/* + * Formats text data under the control of fmt and returns it in an allocated + * buffer. + */ +static char * +get_log_message(const char *fmt, va_list args) +{ + size_t len = 256; /* initial assumption about buffer size */ + + for (;;) + { + char *result; + size_t newlen; + va_list copy_args; + + result = (char *) pgut_malloc(len); + + /* Try to format the data */ + va_copy(copy_args, args); + newlen = pvsnprintf(result, len, fmt, copy_args); + va_end(copy_args); + + if (newlen < len) + return result; /* success */ + + /* Release buffer and loop around to try again with larger len. */ + pfree(result); + len = newlen; + } +} + /* * Logs to stderr or to log file and exit if ERROR. */ void elog(int elevel, const char *fmt, ...) { + char *message; va_list args; /* @@ -302,8 +303,11 @@ elog(int elevel, const char *fmt, ...) return; va_start(args, fmt); - elog_internal(elevel, false, fmt, args); + message = get_log_message(fmt, args); va_end(args); + + elog_internal(elevel, false, message); + pfree(message); } /* @@ -312,6 +316,7 @@ elog(int elevel, const char *fmt, ...) void elog_file(int elevel, const char *fmt, ...) { + char *message; va_list args; /* @@ -322,8 +327,11 @@ elog_file(int elevel, const char *fmt, ...) return; va_start(args, fmt); - elog_internal(elevel, true, fmt, args); + message = get_log_message(fmt, args); va_end(args); + + elog_internal(elevel, true, message); + pfree(message); } /* @@ -332,6 +340,7 @@ elog_file(int elevel, const char *fmt, ...) void pg_log(eLogType type, const char *fmt, ...) { + char *message; va_list args; int elevel = INFO; @@ -364,8 +373,11 @@ pg_log(eLogType type, const char *fmt, ...) return; va_start(args, fmt); - elog_internal(elevel, false, fmt, args); + message = get_log_message(fmt, args); va_end(args); + + elog_internal(elevel, false, message); + pfree(message); } /* @@ -450,7 +462,7 @@ logfile_getname(const char *format, time_t timestamp) logger_config.log_directory[0] == '\0') elog_stderr(ERROR, "logging path is not set"); - filename = (char *) palloc(MAXPGPATH); + filename = (char *) pgut_malloc(MAXPGPATH); snprintf(filename, MAXPGPATH, "%s/", logger_config.log_directory); From eed59874069c05026ae76c04355ead8f9795d9d9 Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Tue, 11 Dec 2018 12:47:21 +0300 Subject: [PATCH 07/18] Remove BOM marker from configuration.c --- src/utils/configuration.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 9b88d159..fe50c494 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -1,4 +1,4 @@ -/*------------------------------------------------------------------------- +/*------------------------------------------------------------------------- * * configuration.c: - function implementations to work with pg_probackup * configurations. From 30cf45816d27b2bb877bc54689f71f09483dc004 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 12 Dec 2018 00:29:20 +0300 Subject: [PATCH 08/18] check backup version before backup validation, throw an error if backup version > binary version --- src/validate.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/validate.c b/src/validate.c index edb386ef..14d347ca 100644 --- a/src/validate.c +++ b/src/validate.c @@ -52,6 +52,14 @@ pgBackupValidate(pgBackup *backup) validate_files_arg *threads_args; int i; + /* Check backup version */ + if (backup->program_version && + parse_program_version(backup->program_version) > parse_program_version(PROGRAM_VERSION)) + elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. " + "pg_probackup do not guarantee to be forward compatible. " + "Please upgrade pg_probackup binary.", + PROGRAM_VERSION, base36enc(backup->start_time), backup->program_version); + /* Revalidation is attempted for DONE, ORPHAN and CORRUPT backups */ if (backup->status != BACKUP_STATUS_OK && backup->status != BACKUP_STATUS_DONE && From 23d1a26e024c3fb66f4d5c514abd80ed53558cae Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Wed, 12 Dec 2018 15:10:25 +0300 Subject: [PATCH 09/18] Fix MERGE of uncompressed with compressed backups --- src/catalog.c | 14 -------------- src/data.c | 23 +++++++++++++++++------ src/dir.c | 2 +- src/merge.c | 23 ++++++++++++++++++++++- src/pg_probackup.h | 1 - 5 files changed, 40 insertions(+), 23 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 77ac0f34..b30fcc66 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -820,20 +820,6 @@ pgBackupInit(pgBackup *backup) backup->server_version[0] = '\0'; } -/* - * Copy backup metadata from **src** into **dst**. - */ -void -pgBackupCopy(pgBackup *dst, pgBackup *src) -{ - pfree(dst->primary_conninfo); - - memcpy(dst, src, sizeof(pgBackup)); - - if (src->primary_conninfo) - dst->primary_conninfo = pstrdup(src->primary_conninfo); -} - /* free pgBackup object */ void pgBackupFree(void *backup) diff --git a/src/data.c b/src/data.c index 0d0c5e5c..ccbe9ca8 100644 --- a/src/data.c +++ b/src/data.c @@ -829,6 +829,8 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate, if (write_header) { + /* We uncompressed the page, so its size is BLCKSZ */ + header.compressed_size = BLCKSZ; if (fwrite(&header, 1, sizeof(header), out) != sizeof(header)) elog(ERROR, "cannot write header of block %u of \"%s\": %s", blknum, file->path, strerror(errno)); @@ -1592,19 +1594,23 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version, if (read_len == 0 && feof(in)) break; /* EOF found */ else if (read_len != 0 && feof(in)) - elog(ERROR, + elog(WARNING, "odd size page found at block %u of \"%s\"", blknum, file->path); else - elog(ERROR, "cannot read header of block %u of \"%s\": %s", + elog(WARNING, "cannot read header of block %u of \"%s\": %s", blknum, file->path, strerror(errno_tmp)); + return false; } COMP_FILE_CRC32(use_crc32c, crc, &header, read_len); if (header.block < blknum) - elog(ERROR, "backup is broken at file->path %s block %u", + { + elog(WARNING, "backup is broken at file->path %s block %u", file->path, blknum); + return false; + } blknum = header.block; @@ -1620,8 +1626,11 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version, read_len = fread(compressed_page.data, 1, MAXALIGN(header.compressed_size), in); if (read_len != MAXALIGN(header.compressed_size)) - elog(ERROR, "cannot read block %u of \"%s\" read %zu of %d", + { + elog(WARNING, "cannot read block %u of \"%s\" read %zu of %d", blknum, file->path, read_len, header.compressed_size); + return false; + } COMP_FILE_CRC32(use_crc32c, crc, compressed_page.data, read_len); @@ -1648,11 +1657,13 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version, is_valid = false; continue; } - elog(ERROR, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ", + elog(WARNING, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ", file->path, uncompressed_size); + return false; } + if (validate_one_page(page.data, file, blknum, - stop_lsn, checksum_version) == PAGE_IS_FOUND_AND_NOT_VALID) + stop_lsn, checksum_version) == PAGE_IS_FOUND_AND_NOT_VALID) is_valid = false; } else diff --git a/src/dir.c b/src/dir.c index ae8cb20b..386f49bc 100644 --- a/src/dir.c +++ b/src/dir.c @@ -211,7 +211,7 @@ pgFileInit(const char *path) strcpy(file->path, path); /* enough buffer size guaranteed */ /* Get file name from the path */ - file_name = strrchr(file->path, '/'); + file_name = last_dir_separator(file->path); if (file_name == NULL) file->name = file->path; else diff --git a/src/merge.c b/src/merge.c index e0669586..b936469b 100644 --- a/src/merge.c +++ b/src/merge.c @@ -91,7 +91,8 @@ do_merge(time_t backup_id) } else { - Assert(dest_backup); + if (dest_backup == NULL) + elog(ERROR, "Target backup %s was not found", base36enc(backup_id)); if (backup->start_time != prev_parent) continue; @@ -335,6 +336,20 @@ delete_source_backup: } } + /* + * Rename FULL backup directory. + */ + elog(INFO, "Rename %s to %s", to_backup_id, from_backup_id); + if (rename(to_backup_path, from_backup_path) == -1) + elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s", + to_backup_path, from_backup_path, strerror(errno)); + + /* + * Merging finished, now we can safely update ID of the destination backup. + */ + to_backup->start_time = from_backup->start_time; + write_backup(to_backup); + /* Cleanup */ if (threads) { @@ -526,6 +541,12 @@ merge_files(void *arg) else copy_file(argument->from_root, argument->to_root, file); + /* + * We need to save compression algorithm type of the target backup to be + * able to restore in the future. + */ + file->compress_alg = to_backup->compress_alg; + if (file->write_size != BYTES_INVALID) elog(LOG, "Moved file \"%s\": " INT64_FORMAT " bytes", file->path, file->write_size); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 0dc649a2..546c1016 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -476,7 +476,6 @@ extern void pgBackupGetPath2(const pgBackup *backup, char *path, size_t len, const char *subdir1, const char *subdir2); extern int pgBackupCreateDir(pgBackup *backup); extern void pgBackupInit(pgBackup *backup); -extern void pgBackupCopy(pgBackup *dst, pgBackup *src); extern void pgBackupFree(void *backup); extern int pgBackupCompareId(const void *f1, const void *f2); extern int pgBackupCompareIdDesc(const void *f1, const void *f2); From 0b05cf0343a688178fafa33ad916c81875287234 Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Wed, 12 Dec 2018 17:27:31 +0300 Subject: [PATCH 10/18] Improve comment within on_interrupt() --- src/utils/pgut.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/utils/pgut.c b/src/utils/pgut.c index ccf832b9..e08b53ea 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -703,7 +703,10 @@ on_interrupt(void) /* Set interruped flag */ interrupted = true; - /* User promts password, call on_cleanup() byhand */ + /* + * User promts password, call on_cleanup() byhand. If we don't do that we + * will stuck forever until an user enters a password. + */ if (in_password) { on_cleanup(); From 5ad509a40efec5af6ce22c5cd8c08b2dee94147b Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Wed, 12 Dec 2018 17:52:01 +0300 Subject: [PATCH 11/18] Fix comment introduced in 0b05cf0343 --- src/utils/pgut.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils/pgut.c b/src/utils/pgut.c index e08b53ea..a7e12e91 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -704,8 +704,8 @@ on_interrupt(void) interrupted = true; /* - * User promts password, call on_cleanup() byhand. If we don't do that we - * will stuck forever until an user enters a password. + * User promts password, call on_cleanup() byhand. Unless we do that we will + * get stuck forever until a user enters a password. */ if (in_password) { From 9484b6de27c3768e7105a87f5c94e989611767fc Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 12 Dec 2018 20:32:42 +0300 Subject: [PATCH 12/18] tests: test_page_create_db added --- tests/page.py | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/tests/page.py b/tests/page.py index f0647082..d0ebbb9b 100644 --- a/tests/page.py +++ b/tests/page.py @@ -1,6 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from testgres import QueryException from datetime import datetime, timedelta import subprocess import gzip @@ -1030,3 +1031,120 @@ class PageBackupTest(ProbackupTest, unittest.TestCase): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_page_create_db(self): + """ + Make node, take full backup, create database db1, take page backup, + restore database and check it presense + """ + self.maxDiff = None + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(module_name, fname), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_size': '10GB', + 'max_wal_senders': '2', + 'checkpoint_timeout': '5min', + 'autovacuum': 'off' + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL BACKUP + node.safe_psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") + + self.backup_node( + backup_dir, 'node', node) + + # CREATE DATABASE DB1 + node.safe_psql("postgres", "create database db1") + node.safe_psql( + "db1", + "create table t_heap as select i as id, md5(i::text) as text, " + "md5(i::text)::tsvector as tsvector from generate_series(0,1000) i") + + # PAGE BACKUP + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE + node_restored = self.make_simple_node( + base_dir="{0}/{1}/node_restored".format(module_name, fname) + ) + + node_restored.cleanup() + self.restore_node( + backup_dir, 'node', node_restored, + backup_id=backup_id, options=["-j", "4"]) + + # COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content(node_restored.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + node_restored.append_conf( + "postgresql.auto.conf", "port = {0}".format(node_restored.port)) + node_restored.slow_start() + + node_restored.safe_psql('db1', 'select 1') + node_restored.cleanup() + + # DROP DATABASE DB1 + node.safe_psql( + "postgres", "drop database db1") + # SECOND PTRACK BACKUP + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + if self.paranoia: + pgdata = self.pgdata_content(node.data_dir) + + # RESTORE SECOND PTRACK BACKUP + self.restore_node( + backup_dir, 'node', node_restored, + backup_id=backup_id, options=["-j", "4"] + ) + + # COMPARE PHYSICAL CONTENT + if self.paranoia: + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) + self.compare_pgdata(pgdata, pgdata_restored) + + # START RESTORED NODE + node_restored.append_conf( + "postgresql.auto.conf", "port = {0}".format(node_restored.port)) + node_restored.slow_start() + + try: + node_restored.safe_psql('db1', 'select 1') + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because we are connecting to deleted database" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd) + ) + except QueryException as e: + self.assertTrue( + 'FATAL: database "db1" does not exist' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd) + ) + + # Clean after yourself + self.del_test_dir(module_name, fname) From eef5769ffd3da14e9d4faa649b0b162dbd606af6 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 13 Dec 2018 10:20:49 +0300 Subject: [PATCH 13/18] tests: pgdata_compare now detect missing or redundant directories --- tests/helpers/ptrack_helpers.py | 48 ++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index d81d7288..0bc27abf 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1047,7 +1047,7 @@ class ProbackupTest(object): except: pass - def pgdata_content(self, directory, ignore_ptrack=True): + def pgdata_content(self, pgdata, ignore_ptrack=True): """ return dict with directory content. " " TAKE IT AFTER CHECKPOINT or BACKUP""" dirs_to_ignore = [ @@ -1064,9 +1064,10 @@ class ProbackupTest(object): # '_ptrack' # ) directory_dict = {} - directory_dict['pgdata'] = directory + directory_dict['pgdata'] = pgdata directory_dict['files'] = {} - for root, dirs, files in os.walk(directory, followlinks=True): + directory_dict['dirs'] = [] + for root, dirs, files in os.walk(pgdata, followlinks=True): dirs[:] = [d for d in dirs if d not in dirs_to_ignore] for file in files: if ( @@ -1076,7 +1077,7 @@ class ProbackupTest(object): continue file_fullpath = os.path.join(root, file) - file_relpath = os.path.relpath(file_fullpath, directory) + file_relpath = os.path.relpath(file_fullpath, pgdata) directory_dict['files'][file_relpath] = {'is_datafile': False} directory_dict['files'][file_relpath]['md5'] = hashlib.md5( open(file_fullpath, 'rb').read()).hexdigest() @@ -1089,12 +1090,51 @@ class ProbackupTest(object): file_fullpath, size_in_pages ) + for root, dirs, files in os.walk(pgdata, topdown=False, followlinks=True): + for directory in dirs: + directory_path = os.path.join(root, directory) + directory_relpath = os.path.relpath(directory_path, pgdata) + + found = False + for d in dirs_to_ignore: + if d in directory_relpath: + found = True + break + + # check if directory already here as part of larger directory + if not found: + for d in directory_dict['dirs']: + # print("OLD dir {0}".format(d)) + if directory_relpath in d: + found = True + break + + if not found: + directory_dict['dirs'].append(directory_relpath) + return directory_dict def compare_pgdata(self, original_pgdata, restored_pgdata): """ return dict with directory content. DO IT BEFORE RECOVERY""" fail = False error_message = 'Restored PGDATA is not equal to original!\n' + + # Compare directories + for directory in restored_pgdata['dirs']: + if directory not in original_pgdata['dirs']: + fail = True + error_message += '\nDirectory is not present' + error_message += ' in original PGDATA: {0}\n'.format( + os.path.join(restored_pgdata['pgdata'], directory)) + + for directory in original_pgdata['dirs']: + if directory not in restored_pgdata['dirs']: + fail = True + error_message += '\nDirectory dissappeared' + error_message += ' in restored PGDATA: {0}\n'.format( + os.path.join(restored_pgdata['pgdata'], directory)) + + for file in restored_pgdata['files']: # File is present in RESTORED PGDATA # but not present in ORIGINAL From a5cef9890829c90b4df15efee6b5e5cda6c9c782 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 13 Dec 2018 11:45:04 +0300 Subject: [PATCH 14/18] tests: test_merge_tablespaces and test_merge_tablespaces_1 added --- tests/helpers/ptrack_helpers.py | 2 +- tests/merge.py | 154 ++++++++++++++++++++++++++++++++ 2 files changed, 155 insertions(+), 1 deletion(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 0bc27abf..8ea90c24 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1123,7 +1123,7 @@ class ProbackupTest(object): for directory in restored_pgdata['dirs']: if directory not in original_pgdata['dirs']: fail = True - error_message += '\nDirectory is not present' + error_message += '\nDirectory was not present' error_message += ' in original PGDATA: {0}\n'.format( os.path.join(restored_pgdata['pgdata'], directory)) diff --git a/tests/merge.py b/tests/merge.py index e4209441..8ce1795e 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -3,6 +3,7 @@ import unittest import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import shutil module_name = "merge" @@ -471,9 +472,162 @@ class MergeTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_merge_tablespaces(self): + """ + Create tablespace with table + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(module_name, fname), + set_replication=True, initdb_params=['--data-checksums'], + pg_options={ + 'wal_level': 'replica', + 'max_wal_senders': '2', + 'autovacuum': 'off' + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.start() + + self.create_tblspace_in_node(node, 'somedata') + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + # FULL backup + self.backup_node(backup_dir, 'node', node) + + # Create new tablespace + self.create_tblspace_in_node(node, 'somedata1') + + node.safe_psql( + "postgres", + "create table t_heap1 tablespace somedata1 as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + + node.safe_psql( + "postgres", + "drop table t_heap" + ) + + # Drop old tablespace + node.safe_psql( + "postgres", + "drop tablespace somedata" + ) + + # PAGE backup + backup_id = self.backup_node(backup_dir, 'node', node, backup_type="page") + + pgdata = self.pgdata_content(node.data_dir) + + node.stop() + shutil.rmtree( + self.get_tblspace_path(node, 'somedata'), + ignore_errors=True) + shutil.rmtree( + self.get_tblspace_path(node, 'somedata1'), + ignore_errors=True) + node.cleanup() + + self.merge_backup(backup_dir, 'node', backup_id) + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + + # this compare should fall because we lost some directories + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_merge_tablespaces_1(self): """ Some test here """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(module_name, fname), + set_replication=True, initdb_params=['--data-checksums'], + pg_options={ + 'wal_level': 'replica', + 'max_wal_senders': '2', + 'autovacuum': 'off' + } + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.start() + + self.create_tblspace_in_node(node, 'somedata') + + # FULL backup + self.backup_node(backup_dir, 'node', node) + node.safe_psql( + "postgres", + "create table t_heap tablespace somedata as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + + # CREATE NEW TABLESPACE + self.create_tblspace_in_node(node, 'somedata1') + + node.safe_psql( + "postgres", + "create table t_heap1 tablespace somedata1 as select i as id," + " md5(i::text) as text, md5(i::text)::tsvector as tsvector" + " from generate_series(0,100) i" + ) + + # PAGE backup + self.backup_node(backup_dir, 'node', node, backup_type="page") + + node.safe_psql( + "postgres", + "drop table t_heap" + ) + node.safe_psql( + "postgres", + "drop tablespace somedata" + ) + + # DELTA backup + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta") + + pgdata = self.pgdata_content(node.data_dir) + + node.stop() + shutil.rmtree( + self.get_tblspace_path(node, 'somedata'), + ignore_errors=True) + shutil.rmtree( + self.get_tblspace_path(node, 'somedata1'), + ignore_errors=True) + node.cleanup() + + self.merge_backup(backup_dir, 'node', backup_id) + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4"]) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) def test_merge_page_truncate(self): """ From e16bdc6660bb5666c485a897e4f444bcdf3d9aab Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 13 Dec 2018 13:21:57 +0300 Subject: [PATCH 15/18] tests: add some comments --- tests/merge.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/merge.py b/tests/merge.py index 8ce1795e..2dbe8ee0 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -473,7 +473,10 @@ class MergeTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_merge_tablespaces(self): """ - Create tablespace with table + Create tablespace with table, take FULL backup, + create another tablespace with another table and drop previous + tablespace, take page backup, merge it and restore + """ fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -550,7 +553,10 @@ class MergeTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_merge_tablespaces_1(self): """ - Some test here + Create tablespace with table, take FULL backup, + create another tablespace with another table, take page backup, + drop first tablespace and take delta backup, + merge it and restore """ fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1202,3 +1208,4 @@ class MergeTest(ProbackupTest, unittest.TestCase): # FULL MERGING # 3. Need new test with corrupted FULL backup +# 4. different compression levels From eec11c074c839bd465780673c417ce7a3f539823 Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Thu, 13 Dec 2018 17:11:45 +0300 Subject: [PATCH 16/18] Fix comment in main() --- src/pg_probackup.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 04b5d486..ada27e3e 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -363,8 +363,9 @@ main(int argc, char *argv[]) } /* - * Read options from env variables or from config file, - * unless we're going to set them via set-config. + * We read options from command line, now we need to read them from + * configuration file since we got backup path and instance name. + * For some commands an instance option isn't required, see above. */ if (instance_name) { From 13c4df812e202bd3c3306c698f3efc493e889a46 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Fri, 14 Dec 2018 16:55:32 +0300 Subject: [PATCH 17/18] bugfix: undeclared compress_alg --- src/pg_probackup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index ada27e3e..10478970 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -582,7 +582,7 @@ compress_init(void) if (backup_subcmd == BACKUP_CMD || backup_subcmd == ARCHIVE_PUSH_CMD) { #ifndef HAVE_LIBZ - if (compress_alg == ZLIB_COMPRESS) + if (instance_config.compress_alg == ZLIB_COMPRESS) elog(ERROR, "This build does not support zlib compression"); else #endif From 53cb7c1dd4f08ecb1ce1b93aeab03aca1803449e Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Fri, 14 Dec 2018 18:41:23 +0300 Subject: [PATCH 18/18] Fix removing files which are not in from_backup file list --- src/merge.c | 51 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/src/merge.c b/src/merge.c index b936469b..545a0505 100644 --- a/src/merge.c +++ b/src/merge.c @@ -208,10 +208,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup) */ pgBackupGetPath(to_backup, control_file, lengthof(control_file), DATABASE_FILE_LIST); - to_files = dir_read_file_list(from_database_path, /* Use from_database_path - * so root path will be - * equal with 'files' */ - control_file); + to_files = dir_read_file_list(NULL, control_file); /* To delete from leaf, sort in reversed order */ parray_qsort(to_files, pgFileComparePathDesc); /* @@ -219,7 +216,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup) */ pgBackupGetPath(from_backup, control_file, lengthof(control_file), DATABASE_FILE_LIST); - files = dir_read_file_list(from_database_path, control_file); + files = dir_read_file_list(NULL, control_file); /* sort by size for load balancing */ parray_qsort(files, pgFileCompareSize); @@ -331,8 +328,18 @@ delete_source_backup: if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL) { + char to_file_path[MAXPGPATH]; + char *prev_path; + + /* We need full path, file object has relative path */ + join_path_components(to_file_path, to_database_path, file->path); + prev_path = file->path; + file->path = to_file_path; + pgFileDelete(file); elog(VERBOSE, "Deleted \"%s\"", file->path); + + file->path = prev_path; } } @@ -378,13 +385,14 @@ merge_files(void *arg) pgBackup *from_backup = argument->from_backup; int i, num_files = parray_num(argument->files); - int to_root_len = strlen(argument->to_root); for (i = 0; i < num_files; i++) { pgFile *file = (pgFile *) parray_get(argument->files, i); pgFile *to_file; pgFile **res_file; + char from_file_path[MAXPGPATH]; + char *prev_file_path; if (!pg_atomic_test_set_flag(&file->lock)) continue; @@ -429,19 +437,23 @@ merge_files(void *arg) continue; } + /* We need to make full path, file object has relative path */ + join_path_components(from_file_path, argument->from_root, file->path); + prev_file_path = file->path; + file->path = from_file_path; + /* * Move the file. We need to decompress it and compress again if * necessary. */ - elog(VERBOSE, "Moving file \"%s\", is_datafile %d, is_cfs %d", + elog(VERBOSE, "Merging file \"%s\", is_datafile %d, is_cfs %d", file->path, file->is_database, file->is_cfs); if (file->is_datafile && !file->is_cfs) { - char to_path_tmp[MAXPGPATH]; /* Path of target file */ + char to_file_path[MAXPGPATH]; /* Path of target file */ - join_path_components(to_path_tmp, argument->to_root, - file->path + to_root_len + 1); + join_path_components(to_file_path, argument->to_root, prev_file_path); /* * We need more complicate algorithm if target file should be @@ -453,7 +465,7 @@ merge_files(void *arg) char tmp_file_path[MAXPGPATH]; char *prev_path; - snprintf(tmp_file_path, MAXPGPATH, "%s_tmp", to_path_tmp); + snprintf(tmp_file_path, MAXPGPATH, "%s_tmp", to_file_path); /* Start the magic */ @@ -479,7 +491,7 @@ merge_files(void *arg) * need the file in directory to_root. */ prev_path = to_file->path; - to_file->path = to_path_tmp; + to_file->path = to_file_path; /* Decompress target file into temporary one */ restore_data_file(tmp_file_path, to_file, false, false, parse_program_version(to_backup->program_version)); @@ -494,7 +506,7 @@ merge_files(void *arg) false, parse_program_version(from_backup->program_version)); - elog(VERBOSE, "Compress file and save it to the directory \"%s\"", + elog(VERBOSE, "Compress file and save it into the directory \"%s\"", argument->to_root); /* Again we need to change path */ @@ -504,7 +516,7 @@ merge_files(void *arg) file->size = pgFileSize(file->path); /* Now we can compress the file */ backup_data_file(NULL, /* We shouldn't need 'arguments' here */ - to_path_tmp, file, + to_file_path, file, to_backup->start_lsn, to_backup->backup_mode, to_backup->compress_alg, @@ -523,7 +535,7 @@ merge_files(void *arg) else { /* We can merge in-place here */ - restore_data_file(to_path_tmp, file, + restore_data_file(to_file_path, file, from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA, true, parse_program_version(from_backup->program_version)); @@ -532,8 +544,8 @@ merge_files(void *arg) * We need to calculate write_size, restore_data_file() doesn't * do that. */ - file->write_size = pgFileSize(to_path_tmp); - file->crc = pgFileGetCRC(to_path_tmp, true, true, NULL); + file->write_size = pgFileSize(to_file_path); + file->crc = pgFileGetCRC(to_file_path, true, true, NULL); } } else if (strcmp(file->name, "pg_control") == 0) @@ -548,8 +560,11 @@ merge_files(void *arg) file->compress_alg = to_backup->compress_alg; if (file->write_size != BYTES_INVALID) - elog(LOG, "Moved file \"%s\": " INT64_FORMAT " bytes", + elog(LOG, "Merged file \"%s\": " INT64_FORMAT " bytes", file->path, file->write_size); + + /* Restore relative path */ + file->path = prev_file_path; } /* Data files merging is successful */