From 867e9e583560f6a2e0181b89edd8eb4670c31569 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 27 Mar 2019 18:15:15 +0300 Subject: [PATCH 1/8] tests: rename validate_test module to validate --- tests/__init__.py | 4 ++-- tests/{validate_test.py => validate.py} | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) rename tests/{validate_test.py => validate.py} (99%) diff --git a/tests/__init__.py b/tests/__init__.py index 0c2d18d3..b1fa5f5d 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,7 +1,7 @@ import unittest from . import init_test, merge, option_test, show_test, compatibility, \ - backup_test, delete_test, delta, restore, validate_test, \ + backup_test, delete_test, delta, restore, validate, \ retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \ compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \ cfs_validate_backup, auth_test, time_stamp, snapfs, logging, \ @@ -36,7 +36,7 @@ def load_tests(loader, tests, pattern): suite.addTests(loader.loadTestsFromModule(retention)) suite.addTests(loader.loadTestsFromModule(show_test)) suite.addTests(loader.loadTestsFromModule(snapfs)) - suite.addTests(loader.loadTestsFromModule(validate_test)) + suite.addTests(loader.loadTestsFromModule(validate)) suite.addTests(loader.loadTestsFromModule(pgpro560)) suite.addTests(loader.loadTestsFromModule(pgpro589)) suite.addTests(loader.loadTestsFromModule(pgpro2068)) diff --git a/tests/validate_test.py b/tests/validate.py similarity index 99% rename from tests/validate_test.py rename to tests/validate.py index 13b87a0b..3b4e26fb 100644 --- a/tests/validate_test.py +++ b/tests/validate.py @@ -146,6 +146,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase): con.commit() target_xid = res[0][0] self.switch_wal_segment(node) + time.sleep(5) self.assertIn( "INFO: Backup validation completed successfully", From 068218c54411242b14f175f6c33eb671ccc37dcd Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 27 Mar 2019 18:16:53 +0300 Subject: [PATCH 2/8] tests: added merge.MergeTest.test_merge_backup_from_future and restore.RestoreTest.test_restore_backup_from_future --- tests/merge.py | 64 +++++++++++++++++++++++++++++++++++++++++++++++- tests/restore.py | 58 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+), 1 deletion(-) diff --git a/tests/merge.py b/tests/merge.py index 0124daac..3c34ec4b 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -4,6 +4,7 @@ import unittest import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import shutil +from datetime import datetime, timedelta module_name = "merge" @@ -1695,7 +1696,68 @@ class MergeTest(ProbackupTest, unittest.TestCase): self.assertEqual( 'CORRUPT', self.show_pb(backup_dir, 'node')[0]['status']) - # self.del_test_dir(module_name, fname) + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_merge_backup_from_future(self): + """ + take FULL backup, table PAGE backup from future, + try to merge page with FULL + """ + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'wal_level': 'replica', + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node(backup_dir, 'node', node) + + node.pgbench_init(scale=3) + + # Take PAGE from future + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + with open( + os.path.join( + backup_dir, 'backups', 'node', + backup_id, "backup.control"), "a") as conf: + conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() + timedelta(days=3))) + + # rename directory + new_id = self.show_pb(backup_dir, 'node')[1]['id'] + + os.rename( + os.path.join(backup_dir, 'backups', 'node', backup_id), + os.path.join(backup_dir, 'backups', 'node', new_id)) + + pgbench = node.pgbench(options=['-T', '3', '-c', '2', '--no-vacuum']) + pgbench.wait() + + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + pgdata = self.pgdata_content(node.data_dir) + + node.cleanup() + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) # 1. always use parent link when merging (intermediates may be from different chain) # 2. page backup we are merging with may disappear after failed merge, diff --git a/tests/restore.py b/tests/restore.py index 4519fe07..3a5ec616 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -5,6 +5,7 @@ import subprocess from datetime import datetime import sys from time import sleep +from datetime import datetime, timedelta module_name = 'restore' @@ -1724,3 +1725,60 @@ class RestoreTest(ProbackupTest, unittest.TestCase): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_restore_backup_from_future(self): + """more complex test_restore_chain()""" + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'wal_level': 'replica', + 'max_wal_senders': '2'}) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL + self.backup_node(backup_dir, 'node', node) + + node.pgbench_init(scale=3) + #pgbench = node.pgbench(options=['-T', '20', '-c', '2']) + #pgbench.wait() + + # Take PAGE from future + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + + with open( + os.path.join( + backup_dir, 'backups', 'node', + backup_id, "backup.control"), "a") as conf: + conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format( + datetime.now() + timedelta(days=3))) + + # rename directory + new_id = self.show_pb(backup_dir, 'node')[1]['id'] + + os.rename( + os.path.join(backup_dir, 'backups', 'node', backup_id), + os.path.join(backup_dir, 'backups', 'node', new_id)) + + pgbench = node.pgbench(options=['-T', '3', '-c', '2', '--no-vacuum']) + pgbench.wait() + + backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') + pgdata = self.pgdata_content(node.data_dir) + + node.cleanup() + self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) From b2cb9cf940906e015360bf7d4ee47336d1471077 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Wed, 27 Mar 2019 22:29:19 +0300 Subject: [PATCH 3/8] PGPRO-2589: use parent_link instead of start-time sorting in merge, validate and restore --- src/catalog.c | 10 ++++-- src/merge.c | 85 ++++++++++++++++++++++++++------------------------ src/restore.c | 41 ++++++++++-------------- src/validate.c | 7 ++++- 4 files changed, 75 insertions(+), 68 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 86b51c87..e1aa0afa 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -393,7 +393,7 @@ catalog_get_backup_list(time_t requested_backup_id) if (curr->backup_mode == BACKUP_MODE_FULL) continue; - for (j = i+1; j < parray_num(backups); j++) + for (j = 0; j < parray_num(backups); j++) { pgBackup *ancestor = parray_get(backups, j); @@ -1034,8 +1034,12 @@ find_parent_full_backup(pgBackup *current_backup) } if (base_full_backup->backup_mode != BACKUP_MODE_FULL) - elog(ERROR, "Failed to find FULL backup parent for %s", - base36enc(current_backup->start_time)); + { + + elog(WARNING, "Backup %s is missing", + base36enc(base_full_backup->parent_backup)); + return NULL; + } return base_full_backup; } diff --git a/src/merge.c b/src/merge.c index 5726e36c..e5861b39 100644 --- a/src/merge.c +++ b/src/merge.c @@ -53,12 +53,10 @@ void do_merge(time_t backup_id) { parray *backups; + parray *merge_list = parray_new(); pgBackup *dest_backup = NULL; pgBackup *full_backup = NULL; - time_t prev_parent = INVALID_BACKUP_ID; int i; - int dest_backup_idx = 0; - int full_backup_idx = 0; if (backup_id == INVALID_BACKUP_ID) elog(ERROR, "required parameter is not specified: --backup-id"); @@ -71,73 +69,79 @@ do_merge(time_t backup_id) /* Get list of all backups sorted in order of descending start time */ backups = catalog_get_backup_list(INVALID_BACKUP_ID); - /* Find destination and parent backups */ + /* Find destination backup first */ for (i = 0; i < parray_num(backups); i++) { pgBackup *backup = (pgBackup *) parray_get(backups, i); - if (backup->start_time > backup_id) - continue; - else if (backup->start_time == backup_id && !dest_backup) + /* found target */ + if (backup->start_time == backup_id) { + /* sanity */ if (backup->status != BACKUP_STATUS_OK && /* It is possible that previous merging was interrupted */ backup->status != BACKUP_STATUS_MERGING && backup->status != BACKUP_STATUS_DELETING) - elog(ERROR, "Backup %s has status: %s", - base36enc(backup->start_time), status2str(backup->status)); + elog(ERROR, "Backup %s has status: %s", + base36enc(backup->start_time), status2str(backup->status)); if (backup->backup_mode == BACKUP_MODE_FULL) elog(ERROR, "Backup %s is full backup", base36enc(backup->start_time)); dest_backup = backup; - dest_backup_idx = i; + break; } - else - { - if (dest_backup == NULL) - elog(ERROR, "Target backup %s was not found", base36enc(backup_id)); - - if (backup->start_time != prev_parent) - continue; - - if (backup->status != BACKUP_STATUS_OK && - /* It is possible that previous merging was interrupted */ - backup->status != BACKUP_STATUS_MERGING) - elog(ERROR, "Backup %s has status: %s", - base36enc(backup->start_time), status2str(backup->status)); - - /* If we already found dest_backup, look for full backup */ - if (dest_backup && backup->backup_mode == BACKUP_MODE_FULL) - { - full_backup = backup; - full_backup_idx = i; - - /* Found target and full backups, so break the loop */ - break; - } - } - - prev_parent = backup->parent_backup; } + /* sanity */ if (dest_backup == NULL) elog(ERROR, "Target backup %s was not found", base36enc(backup_id)); + + /* get full backup */ + full_backup = find_parent_full_backup(dest_backup); + + /* sanity */ if (full_backup == NULL) elog(ERROR, "Parent full backup for the given backup %s was not found", base36enc(backup_id)); - Assert(full_backup_idx != dest_backup_idx); + /* sanity */ + if (full_backup->status != BACKUP_STATUS_OK && + /* It is possible that previous merging was interrupted */ + full_backup->status != BACKUP_STATUS_MERGING) + elog(ERROR, "Backup %s has status: %s", + base36enc(full_backup->start_time), status2str(full_backup->status)); - catalog_lock_backup_list(backups, full_backup_idx, dest_backup_idx); + //Assert(full_backup_idx != dest_backup_idx); + + /* form merge list */ + while(dest_backup->parent_backup_link) + { + /* sanity */ + if (dest_backup->status != BACKUP_STATUS_OK && + /* It is possible that previous merging was interrupted */ + dest_backup->status != BACKUP_STATUS_MERGING && + dest_backup->status != BACKUP_STATUS_DELETING) + elog(ERROR, "Backup %s has status: %s", + base36enc(dest_backup->start_time), status2str(dest_backup->status)); + + parray_append(merge_list, dest_backup); + dest_backup = dest_backup->parent_backup_link; + } + + /* Add FULL backup for easy locking */ + parray_append(merge_list, full_backup); + + /* Lock merge chain */ + catalog_lock_backup_list(merge_list, parray_num(merge_list) - 1, 0); /* * Found target and full backups, merge them and intermediate backups */ - for (i = full_backup_idx; i > dest_backup_idx; i--) + for (i = parray_num(merge_list) - 2; i >= 0; i--) { - pgBackup *from_backup = (pgBackup *) parray_get(backups, i - 1); + pgBackup *from_backup = (pgBackup *) parray_get(merge_list, i); merge_backups(full_backup, from_backup); } @@ -149,6 +153,7 @@ do_merge(time_t backup_id) /* cleanup */ parray_walk(backups, pgBackupFree); parray_free(backups); + parray_free(merge_list); elog(INFO, "Merge of backup %s completed", base36enc(backup_id)); } diff --git a/src/restore.c b/src/restore.c index c8477774..38fa11ed 100644 --- a/src/restore.c +++ b/src/restore.c @@ -55,9 +55,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, pgBackup *dest_backup = NULL; pgBackup *base_full_backup = NULL; pgBackup *corrupted_backup = NULL; - int dest_backup_index = 0; - int base_full_backup_index = 0; - int corrupted_backup_index = 0; char *action = is_restore ? "Restore":"Validate"; parray *parent_chain = NULL; @@ -179,8 +176,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, if (dest_backup == NULL) elog(ERROR, "Backup satisfying target options is not found."); - dest_backup_index = get_backup_index_number(backups, dest_backup); - /* If we already found dest_backup, look for full backup. */ if (dest_backup->backup_mode == BACKUP_MODE_FULL) base_full_backup = dest_backup; @@ -201,7 +196,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, missing_backup_start_time = tmp_backup->parent_backup; missing_backup_id = base36enc_dup(tmp_backup->parent_backup); - for (j = get_backup_index_number(backups, tmp_backup); j >= 0; j--) + for (j = 0; j < parray_num(backups); j++) { pgBackup *backup = (pgBackup *) parray_get(backups, j); @@ -235,7 +230,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, /* parent_backup_id contain human-readable backup ID of oldest invalid backup */ parent_backup_id = base36enc_dup(tmp_backup->start_time); - for (j = get_backup_index_number(backups, tmp_backup) - 1; j >= 0; j--) + for (j = 0; j < parray_num(backups); j++) { pgBackup *backup = (pgBackup *) parray_get(backups, j); @@ -261,6 +256,11 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, } } tmp_backup = find_parent_full_backup(dest_backup); + + /* sanity */ + if (!tmp_backup) + elog(ERROR, "Parent full backup for the given backup %s was not found", + base36enc(dest_backup->start_time)); } /* @@ -276,8 +276,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, if (base_full_backup == NULL) elog(ERROR, "Full backup satisfying target options is not found."); - base_full_backup_index = get_backup_index_number(backups, base_full_backup); - /* * Ensure that directories provided in tablespace mapping are valid * i.e. empty or not exist. @@ -297,17 +295,16 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, /* Take every backup that is a child of base_backup AND parent of dest_backup * including base_backup and dest_backup */ - for (i = base_full_backup_index; i >= dest_backup_index; i--) - { - tmp_backup = (pgBackup *) parray_get(backups, i); - if (is_parent(base_full_backup->start_time, tmp_backup, true) && - is_parent(tmp_backup->start_time, dest_backup, true)) - { - parray_append(parent_chain, tmp_backup); - } + tmp_backup = dest_backup; + while(tmp_backup->parent_backup_link) + { + parray_append(parent_chain, tmp_backup); + tmp_backup = tmp_backup->parent_backup_link; } + parray_append(parent_chain, base_full_backup); + /* for validation or restore with enabled validation */ if (!is_restore || !rt->restore_no_validate) { @@ -317,7 +314,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, /* * Validate backups from base_full_backup to dest_backup. */ - for (i = 0; i < parray_num(parent_chain); i++) + for (i = parray_num(parent_chain) - 1; i >= 0; i--) { tmp_backup = (pgBackup *) parray_get(parent_chain, i); @@ -344,10 +341,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, if (tmp_backup->status != BACKUP_STATUS_OK) { corrupted_backup = tmp_backup; - /* we need corrupted backup index from 'backups' not parent_chain - * so we can properly orphanize all its descendants - */ - corrupted_backup_index = get_backup_index_number(backups, corrupted_backup); break; } /* We do not validate WAL files of intermediate backups @@ -373,7 +366,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, char *corrupted_backup_id; corrupted_backup_id = base36enc_dup(corrupted_backup->start_time); - for (j = corrupted_backup_index - 1; j >= 0; j--) + for (j = 0; j < parray_num(backups); j++) { pgBackup *backup = (pgBackup *) parray_get(backups, j); @@ -418,7 +411,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, */ if (is_restore) { - for (i = 0; i < parray_num(parent_chain); i++) + for (i = parray_num(parent_chain) - 1; i >= 0; i--) { pgBackup *backup = (pgBackup *) parray_get(parent_chain, i); diff --git a/src/validate.c b/src/validate.c index 7d5e94f4..040ea793 100644 --- a/src/validate.c +++ b/src/validate.c @@ -452,6 +452,11 @@ do_validate_instance(void) continue; } base_full_backup = find_parent_full_backup(current_backup); + + /* sanity */ + if (!base_full_backup) + elog(ERROR, "Parent full backup for the given backup %s was not found", + base36enc(current_backup->start_time)); } /* chain is whole, all parents are valid at first glance, * current backup validation can proceed @@ -568,7 +573,7 @@ do_validate_instance(void) if (backup->status == BACKUP_STATUS_OK) { - //tmp_backup = find_parent_full_backup(dest_backup); + /* Revalidation successful, validate corresponding WAL files */ validate_wal(backup, arclog_path, 0, 0, 0, current_backup->tli, From 01207a708643f292b48236542679bd232f043d24 Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Thu, 28 Mar 2019 13:42:58 +0300 Subject: [PATCH 4/8] Throw an error if pg_probackup.conf doesn't exist --- src/catalog.c | 6 +++--- src/configure.c | 22 +++++++++++++++++++--- src/init.c | 2 +- src/pg_probackup.c | 10 +++++++--- src/pg_probackup.h | 2 +- src/utils/configuration.c | 4 ++-- src/utils/configuration.h | 2 +- src/validate.c | 8 +++++++- 8 files changed, 41 insertions(+), 15 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 86b51c87..4c8ba3d4 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -603,7 +603,7 @@ write_backup(pgBackup *backup) int errno_temp; pgBackupGetPath(backup, path, lengthof(path), BACKUP_CONTROL_FILE); - snprintf(path_temp, sizeof(path_temp), "%s.partial", path); + snprintf(path_temp, sizeof(path_temp), "%s.tmp", path); fp = fopen(path_temp, "wt"); if (fp == NULL) @@ -644,7 +644,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, int errno_temp; pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST); - snprintf(path_temp, sizeof(path_temp), "%s.partial", path); + snprintf(path_temp, sizeof(path_temp), "%s.tmp", path); fp = fopen(path_temp, "wt"); if (fp == NULL) @@ -728,7 +728,7 @@ readBackupControlFile(const char *path) return NULL; } - parsed_options = config_read_opt(path, options, WARNING, true); + parsed_options = config_read_opt(path, options, WARNING, true, true); if (parsed_options == 0) { diff --git a/src/configure.c b/src/configure.c index aea78244..3a62831e 100644 --- a/src/configure.c +++ b/src/configure.c @@ -9,6 +9,8 @@ #include "pg_probackup.h" +#include + #include "utils/configuration.h" #include "utils/json.h" @@ -213,16 +215,22 @@ do_show_config(void) * values into the file. */ void -do_set_config(void) +do_set_config(bool missing_ok) { char path[MAXPGPATH]; + char path_temp[MAXPGPATH]; FILE *fp; int i; join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE); - fp = fopen(path, "wt"); + snprintf(path_temp, sizeof(path_temp), "%s.tmp", path); + + if (!missing_ok && !fileExists(path)) + elog(ERROR, "Configuration file \"%s\" doesn't exist", path); + + fp = fopen(path_temp, "wt"); if (fp == NULL) - elog(ERROR, "cannot create %s: %s", + elog(ERROR, "Cannot create configuration file \"%s\": %s", BACKUP_CATALOG_CONF_FILE, strerror(errno)); current_group = NULL; @@ -253,6 +261,14 @@ do_set_config(void) } fclose(fp); + + if (rename(path_temp, path) < 0) + { + int errno_temp = errno; + unlink(path_temp); + elog(ERROR, "Cannot rename configuration file \"%s\" to \"%s\": %s", + path_temp, path, strerror(errno_temp)); + } } void diff --git a/src/init.c b/src/init.c index fb9b7bbb..4fe1168c 100644 --- a/src/init.c +++ b/src/init.c @@ -104,7 +104,7 @@ do_add_instance(void) config_set_opt(instance_options, &instance_config.xlog_seg_size, SOURCE_FILE); /* pgdata was set through command line */ - do_set_config(); + do_set_config(true); elog(INFO, "Instance '%s' successfully inited", instance_name); return 0; diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 4f19f89e..ee3cefde 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -382,8 +382,12 @@ main(int argc, char *argv[]) config_get_opt_env(instance_options); /* Read options from configuration file */ - join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE); - config_read_opt(path, instance_options, ERROR, true); + if (backup_subcmd != ADD_INSTANCE_CMD) + { + join_path_components(path, backup_instance_path, + BACKUP_CATALOG_CONF_FILE); + config_read_opt(path, instance_options, ERROR, true, false); + } } /* Initialize logger */ @@ -525,7 +529,7 @@ main(int argc, char *argv[]) do_show_config(); break; case SET_CONFIG_CMD: - do_set_config(); + do_set_config(false); break; case NO_CMD: /* Should not happen */ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2cceaaed..e84b5e26 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -442,7 +442,7 @@ extern int do_archive_get(char *wal_file_path, char *wal_file_name); /* in configure.c */ extern void do_show_config(void); -extern void do_set_config(void); +extern void do_set_config(bool missing_ok); extern void init_config(InstanceConfig *config); /* in show.c */ diff --git a/src/utils/configuration.c b/src/utils/configuration.c index eabd35eb..ab2c91b6 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -521,7 +521,7 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], */ int config_read_opt(const char *path, ConfigOption options[], int elevel, - bool strict) + bool strict, bool missing_ok) { FILE *fp; char buf[1024]; @@ -532,7 +532,7 @@ config_read_opt(const char *path, ConfigOption options[], int elevel, if (!options) return parsed_options; - if ((fp = pgut_fopen(path, "rt", true)) == NULL) + if ((fp = pgut_fopen(path, "rt", missing_ok)) == NULL) return parsed_options; while (fgets(buf, lengthof(buf), fp)) diff --git a/src/utils/configuration.h b/src/utils/configuration.h index 9602f1d6..96e20047 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -78,7 +78,7 @@ struct ConfigOption extern int config_get_opt(int argc, char **argv, ConfigOption cmd_options[], ConfigOption options[]); extern int config_read_opt(const char *path, ConfigOption options[], int elevel, - bool strict); + bool strict, bool missing_ok); extern void config_get_opt_env(ConfigOption options[]); extern void config_set_opt(ConfigOption options[], void *var, OptionSource source); diff --git a/src/validate.c b/src/validate.c index 7d5e94f4..b4c74786 100644 --- a/src/validate.c +++ b/src/validate.c @@ -337,7 +337,13 @@ do_validate_all(void) sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name); join_path_components(conf_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE); - config_read_opt(conf_path, instance_options, ERROR, false); + if (config_read_opt(conf_path, instance_options, ERROR, false, + true) == 0) + { + elog(WARNING, "Configuration file \"%s\" is empty", conf_path); + corrupted_backup_found = true; + continue; + } do_validate_instance(); } From 65dd94d664a7ea7de10d3f41446637866907eb2a Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Thu, 28 Mar 2019 16:58:14 +0300 Subject: [PATCH 5/8] PGPRO-2589: Binary search for catalog_get_backup_list() --- src/catalog.c | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index e1aa0afa..a25e016b 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -386,25 +386,18 @@ catalog_get_backup_list(time_t requested_backup_id) /* Link incremental backups with their ancestors.*/ for (i = 0; i < parray_num(backups); i++) { - pgBackup *curr = parray_get(backups, i); - - int j; + pgBackup *curr = parray_get(backups, i); + pgBackup **ancestor; + pgBackup key; if (curr->backup_mode == BACKUP_MODE_FULL) continue; - for (j = 0; j < parray_num(backups); j++) - { - pgBackup *ancestor = parray_get(backups, j); - - if (ancestor->start_time == curr->parent_backup) - { - curr->parent_backup_link = ancestor; - /* elog(INFO, "curr %s, ancestor %s j=%d", base36enc_dup(curr->start_time), - base36enc_dup(ancestor->start_time), j); */ - break; - } - } + key.start_time = curr->parent_backup; + ancestor = (pgBackup **) parray_bsearch(backups, &key, + pgBackupCompareIdDesc); + if (ancestor) + curr->parent_backup_link = *ancestor; } return backups; @@ -1035,9 +1028,12 @@ find_parent_full_backup(pgBackup *current_backup) if (base_full_backup->backup_mode != BACKUP_MODE_FULL) { - - elog(WARNING, "Backup %s is missing", - base36enc(base_full_backup->parent_backup)); + if (base_full_backup->parent_backup) + elog(WARNING, "Backup %s is missing", + base36enc(base_full_backup->parent_backup)); + else + elog(WARNING, "Failed to find parent FULL backup for %s", + base36enc(current_backup->start_time)); return NULL; } From e82b5daefcd2bceef118ef79e2d06194279ad912 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 28 Mar 2019 17:10:28 +0300 Subject: [PATCH 6/8] tests: rename module delete_test to delete; added delete.DeleteTest.test_delete_interleaved_incremental_chains and delete.DeleteTest.test_delete_multiple_descendants --- tests/__init__.py | 4 +- tests/{delete_test.py => delete.py} | 299 ++++++++++++++++++++++++++++ 2 files changed, 301 insertions(+), 2 deletions(-) rename tests/{delete_test.py => delete.py} (52%) diff --git a/tests/__init__.py b/tests/__init__.py index b1fa5f5d..ad7f8a16 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,7 +1,7 @@ import unittest from . import init_test, merge, option_test, show_test, compatibility, \ - backup_test, delete_test, delta, restore, validate, \ + backup_test, delete, delta, restore, validate, \ retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \ compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \ cfs_validate_backup, auth_test, time_stamp, snapfs, logging, \ @@ -19,7 +19,7 @@ def load_tests(loader, tests, pattern): # suite.addTests(loader.loadTestsFromModule(cfs_validate_backup)) # suite.addTests(loader.loadTestsFromModule(logging)) suite.addTests(loader.loadTestsFromModule(compression)) - suite.addTests(loader.loadTestsFromModule(delete_test)) + suite.addTests(loader.loadTestsFromModule(delete)) suite.addTests(loader.loadTestsFromModule(delta)) suite.addTests(loader.loadTestsFromModule(exclude)) suite.addTests(loader.loadTestsFromModule(false_positive)) diff --git a/tests/delete_test.py b/tests/delete.py similarity index 52% rename from tests/delete_test.py rename to tests/delete.py index 6c817f76..3c573a85 100644 --- a/tests/delete_test.py +++ b/tests/delete.py @@ -250,3 +250,302 @@ class DeleteTest(ProbackupTest, unittest.TestCase): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_delete_interleaved_incremental_chains(self): + """complicated case of interleaved backup chains""" + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Take FULL BACKUPs + + backup_id_a = self.backup_node(backup_dir, 'node', node) + backup_id_b = self.backup_node(backup_dir, 'node', node) + + # Change FULL B backup status to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + # FULLb ERROR + # FULLa OK + # Take PAGEa1 backup + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa1 OK + # FULLb ERROR + # FULLa OK + # Change FULL B backup status to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + page_id_b1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + # Now we start to play with first generation of PAGE backups + # Change PAGEb1 status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + + # Change PAGEa1 status to OK + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb OK + # FULLa OK + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEa2 OK + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb OK + # FULLa OK + # Change PAGEa2 status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + + # Change PAGEb1 status to OK + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + page_id_b2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # Change PAGEa2 status to OK + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + self.backup_node(backup_dir, 'node', node) + self.backup_node(backup_dir, 'node', node, backup_type='page') + + # PAGEc1 OK + # FULLc OK + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Delete FULLb + self.delete_pb( + backup_dir, 'node', backup_id_b) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 5) + + print(self.show_pb( + backup_dir, 'node', as_json=False, as_text=True)) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_delete_multiple_descendants(self): + """ + PAGEb3 + | PAGEa3 + PAGEb2 / + | PAGEa2 / + PAGEb1 \ / + | PAGEa1 + FULLb | + FULLa should be deleted + """ + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # Take FULL BACKUPs + backup_id_a = self.backup_node(backup_dir, 'node', node) + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + backup_id_b = self.backup_node(backup_dir, 'node', node) + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + # Change FULLb backup status to ERROR + self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') + + page_id_a1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + # Change FULLb backup status to OK + self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') + + # Change PAGEa1 backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR') + + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + page_id_b1 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb1 OK + # PAGEa1 ERROR + # FULLb OK + # FULLa OK + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + # Change PAGEa1 backup status to OK + self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') + + # Change PAGEb1 backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb OK + # FULLa OK + + page_id_a2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + # PAGEa2 OK + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Change PAGEb1 backup status to OK + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + + # Change PAGEa2 backup status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR') + + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + page_id_b2 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + # PAGEb2 OK + # PAGEa2 ERROR + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Change PAGEb2 and PAGEb1 status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR') + + # PAGEb2 ERROR + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb OK + # FULLa OK + + page_id_a3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + pgbench = node.pgbench(options=['-T', '10', '-c', '2']) + pgbench.wait() + + # PAGEa3 OK + # PAGEb2 ERROR + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Change PAGEa3 status to ERROR + self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR') + + # Change PAGEb2 status to OK + self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK') + + page_id_b3 = self.backup_node( + backup_dir, 'node', node, backup_type='page') + + # PAGEb3 OK + # PAGEa3 ERROR + # PAGEb2 OK + # PAGEa2 ERROR + # PAGEb1 ERROR + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Change PAGEa3, PAGEa2 and PAGEb1 status to OK + self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK') + self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK') + + # PAGEb3 OK + # PAGEa3 OK + # PAGEb2 OK + # PAGEa2 OK + # PAGEb1 OK + # PAGEa1 OK + # FULLb OK + # FULLa OK + + # Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1 + self.assertEqual( + self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'], + page_id_a1) + + self.assertEqual( + self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'], + page_id_a1) + + # Delete FULLa + self.delete_pb(backup_dir, 'node', backup_id_a) + + self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) + + # Clean after yourself + self.del_test_dir(module_name, fname) From d48f4024a8d775a2a3ca7884ffddfd9ae420816b Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 28 Mar 2019 17:19:33 +0300 Subject: [PATCH 7/8] PGPRO-2589: use parent_link when deleting backup chain via delete -i BACKUP_ID --- src/delete.c | 95 ++++++++++++++++++++++--------------------------- tests/delete.py | 20 ----------- 2 files changed, 42 insertions(+), 73 deletions(-) diff --git a/src/delete.c b/src/delete.c index 287e7377..2b2f0d61 100644 --- a/src/delete.c +++ b/src/delete.c @@ -24,71 +24,57 @@ do_delete(time_t backup_id) parray *backup_list, *delete_list; pgBackup *target_backup = NULL; - time_t parent_id = 0; XLogRecPtr oldest_lsn = InvalidXLogRecPtr; TimeLineID oldest_tli = 0; /* Get complete list of backups */ backup_list = catalog_get_backup_list(INVALID_BACKUP_ID); - if (backup_id != 0) + delete_list = parray_new(); + + /* Find backup to be deleted and make increment backups array to be deleted */ + for (i = 0; i < parray_num(backup_list); i++) { - delete_list = parray_new(); + pgBackup *backup = (pgBackup *) parray_get(backup_list, i); - /* Find backup to be deleted and make increment backups array to be deleted */ - for (i = (int) parray_num(backup_list) - 1; i >= 0; i--) + if (backup->start_time == backup_id) { - pgBackup *backup = (pgBackup *) parray_get(backup_list, (size_t) i); - - if (backup->start_time == backup_id) - { - parray_append(delete_list, backup); - - /* - * Do not remove next backups, if target backup was finished - * incorrectly. - */ - if (backup->status == BACKUP_STATUS_ERROR) - break; - - /* Save backup id to retreive increment backups */ - parent_id = backup->start_time; - target_backup = backup; - } - else if (target_backup) - { - if (backup->backup_mode != BACKUP_MODE_FULL && - backup->parent_backup == parent_id) - { - /* Append to delete list increment backup */ - parray_append(delete_list, backup); - /* Save backup id to retreive increment backups */ - parent_id = backup->start_time; - } - else - break; - } + target_backup = backup; + break; } - - if (parray_num(delete_list) == 0) - elog(ERROR, "no backup found, cannot delete"); - - catalog_lock_backup_list(delete_list, parray_num(delete_list) - 1, 0); - - /* Delete backups from the end of list */ - for (i = (int) parray_num(delete_list) - 1; i >= 0; i--) - { - pgBackup *backup = (pgBackup *) parray_get(delete_list, (size_t) i); - - if (interrupted) - elog(ERROR, "interrupted during delete backup"); - - delete_backup_files(backup); - } - - parray_free(delete_list); } + /* sanity */ + if (!target_backup) + elog(ERROR, "Failed to find backup %s, cannot delete", base36enc(backup_id)); + + /* form delete list */ + for (i = 0; i < parray_num(backup_list); i++) + { + pgBackup *backup = (pgBackup *) parray_get(backup_list, i); + + /* check if backup is descendant of delete target */ + if (is_parent(target_backup->start_time, backup, false)) + parray_append(delete_list, backup); + } + parray_append(delete_list, target_backup); + + /* Lock marked for delete backups */ + catalog_lock_backup_list(delete_list, parray_num(delete_list) - 1, 0); + + /* Delete backups from the end of list */ + for (i = (int) parray_num(delete_list) - 1; i >= 0; i--) + { + pgBackup *backup = (pgBackup *) parray_get(delete_list, (size_t) i); + + if (interrupted) + elog(ERROR, "interrupted during delete backup"); + + delete_backup_files(backup); + } + + parray_free(delete_list); + /* Clean WAL segments */ if (delete_wal) { @@ -303,6 +289,9 @@ delete_backup_files(pgBackup *backup) elog(INFO, "Progress: (%zd/%zd). Process file \"%s\"", i + 1, num_files, file->path); + if (interrupted) + elog(ERROR, "interrupted during delete backup"); + pgFileDelete(file); } diff --git a/tests/delete.py b/tests/delete.py index 3c573a85..71919c86 100644 --- a/tests/delete.py +++ b/tests/delete.py @@ -388,16 +388,10 @@ class DeleteTest(ProbackupTest, unittest.TestCase): self.set_archiving(backup_dir, 'node', node) node.slow_start() - node.pgbench_init(scale=3) - # Take FULL BACKUPs backup_id_a = self.backup_node(backup_dir, 'node', node) - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() backup_id_b = self.backup_node(backup_dir, 'node', node) - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() # Change FULLb backup status to ERROR self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') @@ -405,9 +399,6 @@ class DeleteTest(ProbackupTest, unittest.TestCase): page_id_a1 = self.backup_node( backup_dir, 'node', node, backup_type='page') - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - # Change FULLb backup status to OK self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK') @@ -426,9 +417,6 @@ class DeleteTest(ProbackupTest, unittest.TestCase): # FULLb OK # FULLa OK - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - # Change PAGEa1 backup status to OK self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK') @@ -443,9 +431,6 @@ class DeleteTest(ProbackupTest, unittest.TestCase): page_id_a2 = self.backup_node( backup_dir, 'node', node, backup_type='page') - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - # PAGEa2 OK # PAGEb1 ERROR # PAGEa1 OK @@ -467,9 +452,6 @@ class DeleteTest(ProbackupTest, unittest.TestCase): page_id_b2 = self.backup_node( backup_dir, 'node', node, backup_type='page') - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() - # PAGEb2 OK # PAGEa2 ERROR # PAGEb1 OK @@ -490,8 +472,6 @@ class DeleteTest(ProbackupTest, unittest.TestCase): page_id_a3 = self.backup_node( backup_dir, 'node', node, backup_type='page') - pgbench = node.pgbench(options=['-T', '10', '-c', '2']) - pgbench.wait() # PAGEa3 OK # PAGEb2 ERROR From 0ff607914bcbb1b7898ac07486a5cadbecbeeb48 Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Thu, 28 Mar 2019 18:08:49 +0300 Subject: [PATCH 8/8] tests: minor fixes to merge module --- tests/merge.py | 53 ++++++++++++++++---------------------------------- 1 file changed, 17 insertions(+), 36 deletions(-) diff --git a/tests/merge.py b/tests/merge.py index 3c34ec4b..03cd49d6 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -1203,11 +1203,7 @@ class MergeTest(ProbackupTest, unittest.TestCase): backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), - set_replication=True, initdb_params=['--data-checksums'], - pg_options={ - 'wal_level': 'replica' - } - ) + set_replication=True, initdb_params=['--data-checksums']) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1258,20 +1254,7 @@ class MergeTest(ProbackupTest, unittest.TestCase): backup_id_deleted = self.show_pb(backup_dir, "node")[1]["id"] # Try to continue failed MERGE - try: - self.merge_backup(backup_dir, "node", backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because of backup corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Backup {0} has status: DELETING".format( - backup_id_deleted) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - + self.merge_backup(backup_dir, "node", backup_id) # Clean after yourself self.del_test_dir(module_name, fname) @@ -1617,7 +1600,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums']) + initdb_params=['--data-checksums'], + pg_options={'autovacuum': 'off'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1639,6 +1623,10 @@ class MergeTest(ProbackupTest, unittest.TestCase): 'postgres', "select pg_relation_filepath('pgbench_accounts')").rstrip() + node.safe_psql( + 'postgres', + "VACUUM pgbench_accounts") + vm_path = path + '_vm' # DELTA backup @@ -1674,27 +1662,20 @@ class MergeTest(ProbackupTest, unittest.TestCase): backup_dir, 'backups', 'node', full_id, 'database', vm_path) - print(file_to_remove) - os.remove(file_to_remove) # Try to continue failed MERGE - try: - self.merge_backup(backup_dir, "node", backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because of backup corruption.\n " - "Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - "ERROR: Merging of backup {0} failed".format( - backup_id) in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.merge_backup(backup_dir, "node", backup_id) self.assertEqual( - 'CORRUPT', self.show_pb(backup_dir, 'node')[0]['status']) + 'OK', self.show_pb(backup_dir, 'node')[0]['status']) + + node.cleanup() + + self.restore_node(backup_dir, 'node', node) + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) self.del_test_dir(module_name, fname)