From 0f212e0455cf39e27a631fca0193a45fd22521dd Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Mon, 22 May 2017 14:17:43 +0300 Subject: [PATCH] TESTS: false_positive added, pgpro-560 added, validate redesign --- tests/__init__.py | 13 +- tests/backup_test.py | 50 +++---- tests/delete_test.py | 29 ++-- tests/expected/option_help.out | 14 +- tests/false_positive.py | 154 +++++++++++++++++++ tests/init_test.py | 2 + tests/option_test.py | 2 + tests/pgpro560.py | 78 ++++++++++ tests/pgpro561.py | 62 -------- tests/pgpro589.py | 71 ++++++--- tests/pgpro688.py | 45 ------ tests/pgpro702.py | 39 ----- tests/ptrack_clean.py | 4 +- tests/ptrack_cluster.py | 3 +- tests/ptrack_helpers.py | 19 ++- tests/ptrack_move_to_tablespace.py | 2 + tests/ptrack_recovery.py | 2 + tests/ptrack_vacuum.py | 3 +- tests/ptrack_vacuum_bits_frozen.py | 2 + tests/ptrack_vacuum_bits_visibility.py | 2 + tests/ptrack_vacuum_full.py | 14 +- tests/ptrack_vacuum_truncate.py | 2 + tests/replica.py | 25 ++-- tests/restore_test.py | 193 +++++++++--------------- tests/retention_test.py | 3 +- tests/show_test.py | 2 + tests/validate_test.py | 200 ++++++++++++------------- 27 files changed, 550 insertions(+), 485 deletions(-) create mode 100644 tests/false_positive.py create mode 100644 tests/pgpro560.py delete mode 100644 tests/pgpro561.py delete mode 100644 tests/pgpro688.py delete mode 100644 tests/pgpro702.py diff --git a/tests/__init__.py b/tests/__init__.py index 39c9f625..37da466a 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -5,16 +5,16 @@ from . import init_test, option_test, show_test, \ retention_test, ptrack_clean, ptrack_cluster, \ ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \ ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \ - ptrack_vacuum_full, ptrack_vacuum_truncate, common_archive_test, \ - pgpro561, pgpro688, pgpro702, pgpro589 + ptrack_vacuum_full, ptrack_vacuum_truncate, pgpro560, pgpro589, \ + false_positive, replica def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - suite.addTests(loader.loadTestsFromModule(pgpro561)) + suite.addTests(loader.loadTestsFromModule(replica)) + suite.addTests(loader.loadTestsFromModule(pgpro560)) suite.addTests(loader.loadTestsFromModule(pgpro589)) - suite.addTests(loader.loadTestsFromModule(pgpro688)) - suite.addTests(loader.loadTestsFromModule(pgpro702)) + suite.addTests(loader.loadTestsFromModule(false_positive)) suite.addTests(loader.loadTestsFromModule(init_test)) suite.addTests(loader.loadTestsFromModule(option_test)) suite.addTests(loader.loadTestsFromModule(show_test)) @@ -34,3 +34,6 @@ def load_tests(loader, tests, pattern): suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_truncate)) return suite + + +# ExpectedFailures are bugs, which should be fixed diff --git a/tests/backup_test.py b/tests/backup_test.py index bd8fbe1e..bc853223 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -10,10 +10,12 @@ class BackupTest(ProbackupTest, unittest.TestCase): def __init__(self, *args, **kwargs): super(BackupTest, self).__init__(*args, **kwargs) -# @classmethod -# def tearDownClass(cls): -# stop_all() -# @unittest.skip("123") + @classmethod + def tearDownClass(cls): + stop_all() + + # @unittest.skip("skip") + # @unittest.expectedFailure def test_backup_modes_archive(self): """standart backup modes with ARCHIVE WAL method""" fname = self.id().split('.')[3] @@ -26,8 +28,10 @@ class BackupTest(ProbackupTest, unittest.TestCase): self.assertEqual(self.init_pb(node), six.b("")) # full backup mode - with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, options=["--verbose"])) + #with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: + # backup_log.write(self.backup_pb(node, options=["--verbose"])) + + self.backup_pb(node) show_backup = self.show_pb(node)[0] full_backup_id = show_backup['ID'] @@ -46,8 +50,7 @@ class BackupTest(ProbackupTest, unittest.TestCase): self.assertEqual(excluded, True) # page backup mode - with open(path.join(node.logs_dir, "backup_page.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) + self.backup_pb(node, backup_type="page") # print self.show_pb(node) show_backup = self.show_pb(node)[1] @@ -60,8 +63,7 @@ class BackupTest(ProbackupTest, unittest.TestCase): self.show_pb(node, id=show_backup['ID'])["parent-backup-id"]) # ptrack backup mode - with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose"])) + self.backup_pb(node, backup_type="ptrack") show_backup = self.show_pb(node)[2] self.assertEqual(show_backup['Status'], six.b("OK")) @@ -69,7 +71,6 @@ class BackupTest(ProbackupTest, unittest.TestCase): node.stop() -# @unittest.skip("123") def test_smooth_checkpoint(self): """full backup with smooth checkpoint""" fname = self.id().split('.')[3] @@ -81,14 +82,12 @@ class BackupTest(ProbackupTest, unittest.TestCase): node.start() self.assertEqual(self.init_pb(node), six.b("")) - with open(path.join(node.logs_dir, "backup.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, options=["--verbose", "-C"])) + self.backup_pb(node, options=["-C"]) self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK")) node.stop() -# @unittest.skip("123") def test_page_backup_without_full(self): """page-level backup without validated full backup""" fname = self.id().split('.')[3] @@ -101,14 +100,13 @@ class BackupTest(ProbackupTest, unittest.TestCase): self.assertEqual(self.init_pb(node), six.b("")) try: - self.backup_pb(node, backup_type="page", options=["--verbose"]) + self.backup_pb(node, backup_type="page") except ProbackupException, e: pass self.assertEqual(self.show_pb(node)[0]['Status'], six.b("ERROR")) node.stop() -# @unittest.skip("123") def test_ptrack_threads(self): """ptrack multi thread backup mode""" fname = self.id().split('.')[3] @@ -120,19 +118,17 @@ class BackupTest(ProbackupTest, unittest.TestCase): node.start() self.assertEqual(self.init_pb(node), six.b("")) - with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose", "-j", "4"])) + self.backup_pb(node, backup_type="full", options=["-j", "4"]) self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK")) with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "-j", "4"])) + backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["-j", "4"])) self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK")) node.stop() -# @unittest.skip("123") def test_ptrack_threads_stream(self): """ptrack multi thread backup mode and stream""" fname = self.id().split('.')[3] @@ -144,21 +140,11 @@ class BackupTest(ProbackupTest, unittest.TestCase): node.start() self.assertEqual(self.init_pb(node), six.b("")) - with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: - backup_log.write(self.backup_pb( - node, - backup_type="full", - options=["--verbose", "-j", "4", "--stream"] - )) + self.backup_pb(node, backup_type="full", options=["-j", "4", "--stream"]) self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK")) - with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log: - backup_log.write(self.backup_pb( - node, - backup_type="ptrack", - options=["--verbose", "-j", "4", "--stream"] - )) + self.backup_pb(node, backup_type="ptrack", options=["-j", "4", "--stream"]) self.assertEqual(self.show_pb(node)[1]['Status'], six.b("OK")) node.stop() diff --git a/tests/delete_test.py b/tests/delete_test.py index e95aa827..7555321c 100644 --- a/tests/delete_test.py +++ b/tests/delete_test.py @@ -11,10 +11,12 @@ class DeleteTest(ProbackupTest, unittest.TestCase): def __init__(self, *args, **kwargs): super(DeleteTest, self).__init__(*args, **kwargs) -# @classmethod -# def tearDownClass(cls): -# stop_all() -# @unittest.skip("123") + @classmethod + def tearDownClass(cls): + stop_all() + + # @unittest.skip("skip") + # @unittest.expectedFailure def test_delete_full_backups(self): """delete full backups""" fname = self.id().split('.')[3] @@ -28,22 +30,19 @@ class DeleteTest(ProbackupTest, unittest.TestCase): node.pgbench_init() # full backup mode - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, options=["--verbose"])) + self.backup_pb(node) pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, options=["--verbose"])) + self.backup_pb(node) pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - with open(path.join(node.logs_dir, "backup_3.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, options=["--verbose"])) + self.backup_pb(node) show_backups = self.show_pb(node) id_1 = show_backups[0]['ID'] @@ -55,7 +54,6 @@ class DeleteTest(ProbackupTest, unittest.TestCase): node.stop() -# @unittest.skip("123") def test_delete_increment(self): """delete increment and all after him""" fname = self.id().split('.')[3] @@ -68,16 +66,13 @@ class DeleteTest(ProbackupTest, unittest.TestCase): self.assertEqual(self.init_pb(node), six.b("")) # full backup mode - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, options=["--verbose"])) + self.backup_pb(node) # page backup mode - with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) + self.backup_pb(node, backup_type="page") # page backup mode - with open(path.join(node.logs_dir, "backup_3.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) + self.backup_pb(node, backup_type="page") # full backup mode self.backup_pb(node) diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 53b46eb4..ec24820f 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -9,7 +9,13 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup set-config -B backup-dir [-d dbname] [-h host] [-p port] [-U username] - [--retention-redundancy=retention-redundancy]] + [--log-level=log-level] + [--log-filename=log-filename] + [--error-log-filename=error-log-filename] + [--log-directory=log-directory] + [--log-rotation-size=log-rotation-size] + [--log-rotation-age=log-rotation-age] + [--retention-redundancy=retention-redundancy] [--retention-window=retention-window] pg_probackup show-config -B backup-dir @@ -17,16 +23,16 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup backup -B backup-path -b backup-mode [-D pgdata-dir] [-C] [--stream [-S slot-name]] [--backup-pg-log] [-j num-threads] [--archive-timeout=archive-timeout] - [--progress] [-q] [-v] [--delete-expired] + [--progress] [--delete-expired] [-d dbname] [-h host] [-p port] [-U username] pg_probackup restore -B backup-dir - [-D pgdata-dir] [-i backup-id] [--progress] [-q] [-v] + [-D pgdata-dir] [-i backup-id] [--progress] [--time=time|--xid=xid [--inclusive=boolean]] [--timeline=timeline] [-T OLDDIR=NEWDIR] pg_probackup validate -B backup-dir - [-D pgdata-dir] [-i backup-id] [--progress] [-q] [-v] + [-D pgdata-dir] [-i backup-id] [--progress] [--time=time|--xid=xid [--inclusive=boolean]] [--timeline=timeline] diff --git a/tests/false_positive.py b/tests/false_positive.py new file mode 100644 index 00000000..79b28b56 --- /dev/null +++ b/tests/false_positive.py @@ -0,0 +1,154 @@ +import unittest +import os +import six +from .ptrack_helpers import ProbackupTest, ProbackupException +from datetime import datetime, timedelta +from testgres import stop_all +import subprocess +from sys import exit + + +class FalsePositive(ProbackupTest, unittest.TestCase): + + def __init__(self, *args, **kwargs): + super(FalsePositive, self).__init__(*args, **kwargs) + + @classmethod + def tearDownClass(cls): + stop_all() + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_pgpro561(self): + """ + make node with archiving, make stream backup, restore it to node1, + check that archiving is not successful on node1 + """ + fname = self.id().split('.')[3] + master = self.make_simple_node(base_dir="tmp_dirs/false_positive/{0}/master".format(fname), + set_archiving=True, + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica', 'max_wal_senders': '2'} + ) + master.start() + + self.assertEqual(self.init_pb(master), six.b("")) + id = self.backup_pb(master, backup_type='full', options=["--stream"]) + + node1 = self.make_simple_node(base_dir="tmp_dirs/false_positive/{0}/node1".format(fname)) + node1.cleanup() + + master.psql( + "postgres", + "create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") + + self.backup_pb(master, backup_type='page', options=["--stream"]) + self.restore_pb(backup_dir=self.backup_dir(master), data_dir=node1.data_dir) + node1.append_conf('postgresql.auto.conf', 'port = {0}'.format(node1.port)) + node1.start({"-t": "600"}) + + timeline_master = master.get_control_data()["Latest checkpoint's TimeLineID"] + timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"] + self.assertEqual(timeline_master, timeline_node1, "Timelines on Master and Node1 should be equal. This is unexpected") + + archive_command_master = master.safe_psql("postgres", "show archive_command") + archive_command_node1 = node1.safe_psql("postgres", "show archive_command") + self.assertEqual(archive_command_master, archive_command_node1, "Archive command on Master and Node should be equal. This is unexpected") + + res = node1.safe_psql("postgres", "select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL") + # self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip())) + if res == six.b(""): + self.assertEqual(1, 0, 'Error is expected due to Master and Node1 having the common archive and archive_command') + + master.stop() + node1.stop() + + def pgpro688(self): + """ + make node with archiving, make backup, + get Recovery Time, validate to Recovery Time + Waiting PGPRO-688 + """ + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir="tmp_dirs/false_positive/{0}".format(fname), + set_archiving=True, + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica', 'max_wal_senders': '2'} + ) + node.start() + + self.assertEqual(self.init_pb(node), six.b("")) + id = self.backup_pb(node, backup_type='full') + recovery_time = self.show_pb(node, id=id)['recovery-time'] + + # Uncommenting this section will make this test True Positive + #node.psql("postgres", "select pg_create_restore_point('123')") + #node.psql("postgres", "select txid_current()") + #node.psql("postgres", "select pg_switch_xlog()") + #### + + try: + self.validate_pb(node, options=["--time='{0}'".format(recovery_time)]) + self.assertEqual(1, 0, 'Error is expected because We should not be able safely validate "Recovery Time" without wal record with timestamp') + except ProbackupException, e: + self.assertTrue('WARNING: recovery can be done up to time {0}'.format(recovery_time) in e.message) + + node.stop() + + def pgpro702_688(self): + """ + make node without archiving, make stream backup, + get Recovery Time, validate to Recovery Time + """ + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir="tmp_dirs/false_positive/{0}".format(fname), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica', 'max_wal_senders': '2'} + ) + node.start() + + self.assertEqual(self.init_pb(node), six.b("")) + id = self.backup_pb(node, backup_type='full', options=["--stream"]) + recovery_time = self.show_pb(node, id=id)['recovery-time'] + + self.assertIn(six.b("INFO: backup validation completed successfully on"), + self.validate_pb(node, options=["--time='{0}'".format(recovery_time)])) + + def test_validate_wal_lost_segment(self): + """Loose segment located between backups. ExpectedFailure. This is BUG """ + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir="tmp_dirs/false_positive/{0}".format(fname), + set_archiving=True, + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica'} + ) + + node.start() + self.assertEqual(self.init_pb(node), six.b("")) + self.backup_pb(node, backup_type='full') + + # make some wals + node.pgbench_init(scale=2) + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "10"] + ) + pgbench.wait() + pgbench.stdout.close() + + # delete last wal segment + wals_dir = os.path.join(self.backup_dir(node), "wal") + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] + wals = map(int, wals) + os.remove(os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals)))) + + + ##### Hole Smokes, Batman! We just lost a wal segment and know nothing about it + ##### We need archive-push ASAP + self.backup_pb(node, backup_type='full') + self.assertTrue('validation completed successfully' in self.validate_pb(node)) + ######## + node.stop() diff --git a/tests/init_test.py b/tests/init_test.py index 2961a28f..b9541d3c 100644 --- a/tests/init_test.py +++ b/tests/init_test.py @@ -12,6 +12,8 @@ class InitTest(ProbackupTest, unittest.TestCase): def __init__(self, *args, **kwargs): super(InitTest, self).__init__(*args, **kwargs) + # @unittest.skip("skip") + # @unittest.expectedFailure def test_success_1(self): """Success normal init""" fname = self.id().split(".")[3] diff --git a/tests/option_test.py b/tests/option_test.py index 57c5e977..2217949e 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -14,6 +14,8 @@ class OptionTest(ProbackupTest, unittest.TestCase): def tearDownClass(cls): stop_all() + # @unittest.skip("skip") + # @unittest.expectedFailure def test_help_1(self): """help options""" fname = self.id().split(".")[3] diff --git a/tests/pgpro560.py b/tests/pgpro560.py new file mode 100644 index 00000000..67275e5e --- /dev/null +++ b/tests/pgpro560.py @@ -0,0 +1,78 @@ +import unittest +import os +import six +from .ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack +from datetime import datetime, timedelta +from testgres import stop_all +import subprocess +from sys import exit + + +class CheckSystemID(ProbackupTest, unittest.TestCase): + + def __init__(self, *args, **kwargs): + super(CheckSystemID, self).__init__(*args, **kwargs) + + @classmethod + def tearDownClass(cls): + stop_all() + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_pgpro560_control_file_loss(self): + """ + https://jira.postgrespro.ru/browse/PGPRO-560 + make node with stream support, delete control file + make backup + check that backup failed + """ + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir="tmp_dirs/pgpro560/{0}/node".format(fname), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica'} + ) + node.start() + + self.assertEqual(self.init_pb(node), six.b("")) + file = os.path.join(node.base_dir,'data', 'global', 'pg_control') + os.remove(file) + + try: + self.backup_pb(node, backup_type='full', options=['--stream']) + assertEqual(1, 0, 'Error is expected because of control file loss') + except ProbackupException, e: + self.assertTrue( + 'ERROR: could not open file' and 'pg_control' in e.message, + 'Expected error is about control file loss') + + def test_pgpro560_systemid_mismatch(self): + """ + https://jira.postgrespro.ru/browse/PGPRO-560 + make node1 and node2 + feed to backup PGDATA from node1 and PGPORT from node2 + check that backup failed + """ + fname = self.id().split('.')[3] + node1 = self.make_simple_node(base_dir="tmp_dirs/pgpro560/{0}/node1".format(fname), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica'} + ) + node1.start() + node2 = self.make_simple_node(base_dir="tmp_dirs/pgpro560/{0}/node2".format(fname), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica'} + ) + node2.start() + self.assertEqual(self.init_pb(node1), six.b("")) + + try: + self.backup_pb(node1, data_dir=node2.data_dir, backup_type='full', options=['--stream']) + assertEqual(1, 0, 'Error is expected because of SYSTEM ID mismatch') + except ProbackupException, e: + self.assertTrue( + 'ERROR: Backup data directory was initialized for system id' and + 'but target backup directory system id is' in e.message, + 'Expected error is about SYSTEM ID mismatch') diff --git a/tests/pgpro561.py b/tests/pgpro561.py deleted file mode 100644 index 0d94d1a4..00000000 --- a/tests/pgpro561.py +++ /dev/null @@ -1,62 +0,0 @@ -import unittest -import os -import six -from .ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from datetime import datetime, timedelta -from testgres import stop_all -import subprocess -from sys import exit - - -class CommonArchiveDir(ProbackupTest, unittest.TestCase): - - def __init__(self, *args, **kwargs): - super(CommonArchiveDir, self).__init__(*args, **kwargs) - -# @classmethod -# def tearDownClass(cls): -# stop_all() - - def test_pgpro561(self): - """ - EXPECTED TO FAIL - make node with archiving, make stream backup, restore it to node1, - check that archiving is not successful on node1 - """ - fname = self.id().split('.')[3] - master = self.make_simple_node(base_dir="tmp_dirs/pgpro561/{0}/master".format(fname), - set_archiving=True, - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'wal_level': 'replica', 'max_wal_senders': '2'} - ) - master.start() - - self.assertEqual(self.init_pb(master), six.b("")) - id = self.backup_pb(master, backup_type='full', options=["--stream"]) - - node1 = self.make_simple_node(base_dir="tmp_dirs/pgpro561/{0}/node1".format(fname)) - node1.cleanup() - - master.psql( - "postgres", - "create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") - - self.backup_pb(master, backup_type='page', options=["--stream"]) - self.restore_pb(backup_dir=self.backup_dir(master), data_dir=node1.data_dir) - node1.append_conf('postgresql.auto.conf', 'port = {0}'.format(node1.port)) - node1.start({"-t": "600"}) - - timeline_master = master.get_control_data()["Latest checkpoint's TimeLineID"] - timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"] - self.assertEqual(timeline_master, timeline_node1, "Timelines on Master and Node1 should be equal. This is unexpected") - - archive_command_master = master.safe_psql("postgres", "show archive_command") - archive_command_node1 = node1.safe_psql("postgres", "show archive_command") - self.assertEqual(archive_command_master, archive_command_node1, "Archive command on Master and Node should be equal. This is unexpected") - - res = node1.safe_psql("postgres", "select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL") - self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip())) - - master.stop() - node1.stop() diff --git a/tests/pgpro589.py b/tests/pgpro589.py index 9f15643f..5ad21e58 100644 --- a/tests/pgpro589.py +++ b/tests/pgpro589.py @@ -8,21 +8,22 @@ import subprocess from sys import exit -class LsnCheck(ProbackupTest, unittest.TestCase): +class ArchiveCheck(ProbackupTest, unittest.TestCase): def __init__(self, *args, **kwargs): - super(LsnCheck, self).__init__(*args, **kwargs) + super(ArchiveCheck, self).__init__(*args, **kwargs) -# @classmethod -# def tearDownClass(cls): -# stop_all() -# @unittest.expectedFailure - def test_pgpro589(self): + @classmethod + def tearDownClass(cls): + stop_all() + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_archive_mode(self): """ https://jira.postgrespro.ru/browse/PGPRO-589 make node without archive support, make backup which should fail - check that no files where copied to backup catalogue - EXPECTED TO FAIL + check ERROR text """ fname = self.id().split('.')[3] node = self.make_simple_node(base_dir="tmp_dirs/pgpro589/{0}/node".format(fname), @@ -43,18 +44,54 @@ class LsnCheck(ProbackupTest, unittest.TestCase): path = node.safe_psql("postgres", "select pg_relation_filepath('pgbench_accounts')").rstrip() self.assertEqual(self.init_pb(node), six.b("")) - proc = self.backup_pb( - node, backup_type='full', options=['--archive-timeout=1'], async=True) - content = proc.stderr.read() - self.assertEqual(True, 'wait for LSN' in repr(content), - 'No Wait for LSN') - self.assertEqual(True, 'could not be archived' in repr(content), - 'No Fail Archiving Message') + try: + self.backup_pb(node, backup_type='full', options=['--archive-timeout=10']) + assertEqual(1, 0, 'Error is expected because of disabled archive_mode') + except ProbackupException, e: + self.assertEqual(e.message, 'ERROR: Archiving must be enabled for archive backup\n') + + def test_pgpro589(self): + """ + https://jira.postgrespro.ru/browse/PGPRO-589 + make node without archive support, make backup which should fail + check that backup status equal to ERROR + check that no files where copied to backup catalogue + """ + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir="tmp_dirs/pgpro589/{0}/node".format(fname), + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica'} + ) + node.append_conf("postgresql.auto.conf", "archive_mode = on") + node.append_conf("postgresql.auto.conf", "wal_level = archive") + node.append_conf("postgresql.auto.conf", "archive_command = 'exit 0'") + node.start() + + node.pgbench_init(scale=5) + pgbench = node.pgbench( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=["-c", "4", "-T", "10"] + ) + pgbench.wait() + pgbench.stdout.close() + + path = node.safe_psql("postgres", "select pg_relation_filepath('pgbench_accounts')").rstrip() + self.assertEqual(self.init_pb(node), six.b("")) + + try: + self.backup_pb( + node, backup_type='full', options=['--archive-timeout=10']) + assertEqual(1, 0, 'Error is expected because of missing archive wal segment with start_backup() LSN') + except ProbackupException, e: + self.assertTrue('INFO: wait for LSN' in e.message, "Expecting 'INFO: wait for LSN'") + self.assertTrue('ERROR: switched WAL segment' and 'could not be archived' in e.message, + "Expecting 'ERROR: switched WAL segment could not be archived'") id = self.show_pb(node)[0]['ID'] self.assertEqual('ERROR', self.show_pb(node, id=id)['status'], 'Backup should have ERROR status') #print self.backup_dir(node) file = os.path.join(self.backup_dir(node), 'backups', id, 'database', path) - self.assertEqual(False, os.path.isfile(file), + self.assertFalse(os.path.isfile(file), '\n Start LSN was not found in archive but datafiles where copied to backup catalogue.\n For example: {0}\n It is not optimal'.format(file)) diff --git a/tests/pgpro688.py b/tests/pgpro688.py deleted file mode 100644 index 539c5e37..00000000 --- a/tests/pgpro688.py +++ /dev/null @@ -1,45 +0,0 @@ -import unittest -import os -import six -from .ptrack_helpers import ProbackupTest, ProbackupException -from datetime import datetime, timedelta -from testgres import stop_all -import subprocess -from sys import exit - - -class ValidateTime(ProbackupTest, unittest.TestCase): - - def __init__(self, *args, **kwargs): - super(ValidateTime, self).__init__(*args, **kwargs) - -# @classmethod -# def tearDownClass(cls): -# stop_all() - - def test_validate_recovery_time(self): - """ - make node with archiving, make backup, - get Recovery Time, validate to Recovery Time - EXPECT VALIDATE TO FAIL - Waiting PGPRO-688 - """ - fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir="tmp_dirs/pgpro668/{0}".format(fname), - set_archiving=True, - initdb_params=['--data-checksums'], - pg_options={'wal_level': 'replica', 'max_wal_senders': '2'} - ) - node.start() - - self.assertEqual(self.init_pb(node), six.b("")) - id = self.backup_pb(node, backup_type='full') - recovery_time = self.show_pb(node, id=id)['recovery-time'] - - # Optional - #node.psql("postgres", "select pg_create_restore_point('123')") - #node.psql("postgres", "select txid_current()") - #node.psql("postgres", "select pg_switch_xlog()") - self.assertIn(six.b("INFO: backup validation completed successfully on"), - self.validate_pb(node, options=["--time='{0}'".format(recovery_time)])) - #### diff --git a/tests/pgpro702.py b/tests/pgpro702.py deleted file mode 100644 index 60361dd3..00000000 --- a/tests/pgpro702.py +++ /dev/null @@ -1,39 +0,0 @@ -import unittest -import os -import six -from .ptrack_helpers import ProbackupTest, ProbackupException -from datetime import datetime, timedelta -from testgres import stop_all -import subprocess -from sys import exit - - -class ValidateTime(ProbackupTest, unittest.TestCase): - - def __init__(self, *args, **kwargs): - super(ValidateTime, self).__init__(*args, **kwargs) - -# @classmethod -# def tearDownClass(cls): -# stop_all() - - def test_validate_recovery_time(self): - """ - make node without archiving, make stream backup, - get Recovery Time, validate to Recovery Time - EXPECT VALIDATE TO FAIL - """ - fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir="tmp_dirs/pgpro702/{0}".format(fname), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'wal_level': 'replica', 'max_wal_senders': '2'} - ) - node.start() - - self.assertEqual(self.init_pb(node), six.b("")) - id = self.backup_pb(node, backup_type='full', options=["--stream"]) - recovery_time = self.show_pb(node, id=id)['recovery-time'] - - self.assertIn(six.b("INFO: backup validation completed successfully on"), - self.validate_pb(node, options=["--time='{0}'".format(recovery_time)])) diff --git a/tests/ptrack_clean.py b/tests/ptrack_clean.py index 2a458c75..94f57007 100644 --- a/tests/ptrack_clean.py +++ b/tests/ptrack_clean.py @@ -9,10 +9,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase): super(SimpleTest, self).__init__(*args, **kwargs) def teardown(self): - # clean_all() stop_all() -# @unittest.skip("123") + # @unittest.skip("skip") + # @unittest.expectedFailure def test_ptrack_clean(self): fname = self.id().split('.')[3] node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname), diff --git a/tests/ptrack_cluster.py b/tests/ptrack_cluster.py index 5f2707c5..933074f1 100644 --- a/tests/ptrack_cluster.py +++ b/tests/ptrack_cluster.py @@ -12,7 +12,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase): # clean_all() stop_all() -# @unittest.skip("123") + # @unittest.skip("skip") + # @unittest.expectedFailure def test_ptrack_cluster_btree(self): fname = self.id().split('.')[3] node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname), diff --git a/tests/ptrack_helpers.py b/tests/ptrack_helpers.py index 4c7184b7..f887154b 100644 --- a/tests/ptrack_helpers.py +++ b/tests/ptrack_helpers.py @@ -161,6 +161,7 @@ class ProbackupTest(object): node.init(initdb_params=initdb_params) # Sane default parameters, not a shit with fsync = off from testgres + node.append_conf("postgresql.auto.conf", "{0} = {1}".format('shared_buffers', '10MB')) node.append_conf("postgresql.auto.conf", "{0} = {1}".format('fsync', 'on')) node.append_conf("postgresql.auto.conf", "{0} = {1}".format('wal_level', 'minimal')) @@ -294,7 +295,7 @@ class ProbackupTest(object): def run_pb(self, command, async=False): try: - # print [self.probackup_path] + command + #print [self.probackup_path] + command if async is True: return subprocess.Popen( [self.probackup_path] + command, @@ -315,7 +316,9 @@ class ProbackupTest(object): return output else: # return backup ID - return output.split()[2] + for line in output.splitlines(): + if 'INFO: Backup' and 'completed' in line: + return line.split()[2] else: return output except subprocess.CalledProcessError as e: @@ -391,13 +394,17 @@ class ProbackupTest(object): body = body[::-1] # split string in list with string for every header element header_split = re.split(" +", header) - # CRUNCH, remove last item, because it`s empty, like that '' - header_split.pop() + # Remove empty items + for i in header_split: + if i == '': + header_split.remove(i) for backup_record in body: # split string in list with string for every backup record element backup_record_split = re.split(" +", backup_record) - # CRUNCH, remove last item, because it`s empty, like that '' - backup_record_split.pop() + # Remove empty items + for i in backup_record_split: + if i == '': + backup_record_split.remove(i) if len(header_split) != len(backup_record_split): print warning.format( header=header, body=body, diff --git a/tests/ptrack_move_to_tablespace.py b/tests/ptrack_move_to_tablespace.py index ff9d56ea..d590b4ed 100644 --- a/tests/ptrack_move_to_tablespace.py +++ b/tests/ptrack_move_to_tablespace.py @@ -15,6 +15,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase): # clean_all() stop_all() + # @unittest.skip("skip") + # @unittest.expectedFailure def test_ptrack_recovery(self): fname = self.id().split(".")[3] node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname), diff --git a/tests/ptrack_recovery.py b/tests/ptrack_recovery.py index 0457ac89..d9b6302d 100644 --- a/tests/ptrack_recovery.py +++ b/tests/ptrack_recovery.py @@ -15,6 +15,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase): # clean_all() stop_all() + # @unittest.skip("skip") + # @unittest.expectedFailure def test_ptrack_recovery(self): fname = self.id().split(".")[3] node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname), diff --git a/tests/ptrack_vacuum.py b/tests/ptrack_vacuum.py index 214db8ad..8d05bc6f 100644 --- a/tests/ptrack_vacuum.py +++ b/tests/ptrack_vacuum.py @@ -12,7 +12,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase): # clean_all() stop_all() -# @unittest.skip("123") + # @unittest.skip("skip") + # @unittest.expectedFailure def test_ptrack_vacuum(self): fname = self.id().split('.')[3] node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname), diff --git a/tests/ptrack_vacuum_bits_frozen.py b/tests/ptrack_vacuum_bits_frozen.py index 47b60b66..190d4b46 100644 --- a/tests/ptrack_vacuum_bits_frozen.py +++ b/tests/ptrack_vacuum_bits_frozen.py @@ -13,6 +13,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase): # clean_all() stop_all() + # @unittest.skip("skip") + # @unittest.expectedFailure def test_ptrack_vacuum_bits_frozen(self): fname = self.id().split('.')[3] node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname), diff --git a/tests/ptrack_vacuum_bits_visibility.py b/tests/ptrack_vacuum_bits_visibility.py index c3cbf33a..161772d5 100644 --- a/tests/ptrack_vacuum_bits_visibility.py +++ b/tests/ptrack_vacuum_bits_visibility.py @@ -13,6 +13,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase): # clean_all() stop_all() + # @unittest.skip("skip") + # @unittest.expectedFailure def test_ptrack_vacuum_bits_visibility(self): fname = self.id().split('.')[3] node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname), diff --git a/tests/ptrack_vacuum_full.py b/tests/ptrack_vacuum_full.py index ade1fd30..23bd5cd1 100644 --- a/tests/ptrack_vacuum_full.py +++ b/tests/ptrack_vacuum_full.py @@ -5,17 +5,6 @@ from testgres import get_new_node, stop_all from os import path, open, lseek, read, close, O_RDONLY from .ptrack_helpers import ProbackupTest, idx_ptrack -# res = node.execute('postgres', 'show fsync') -# print res[0][0] -# res = node.execute('postgres', 'show wal_level') -# print res[0][0] -# a = ProbackupTest -# res = node.execute('postgres', 'select 1')` -# self.assertEqual(len(res), 1) -# self.assertEqual(res[0][0], 1) -# node.stop() -# a = self.backup_dir(node) - class SimpleTest(ProbackupTest, unittest.TestCase): def __init__(self, *args, **kwargs): @@ -25,9 +14,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase): # clean_all() stop_all() + # @unittest.skip("skip") + # @unittest.expectedFailure def test_ptrack_vacuum_full(self): fname = self.id().split('.')[3] - print '{0} started'.format(fname) node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname), set_replication=True, initdb_params=['--data-checksums', '-A trust'], diff --git a/tests/ptrack_vacuum_truncate.py b/tests/ptrack_vacuum_truncate.py index fe0db358..d40dfc8a 100644 --- a/tests/ptrack_vacuum_truncate.py +++ b/tests/ptrack_vacuum_truncate.py @@ -13,6 +13,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase): # clean_all() stop_all() + # @unittest.skip("skip") + # @unittest.expectedFailure def test_ptrack_vacuum_truncate(self): fname = self.id().split('.')[3] node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname), diff --git a/tests/replica.py b/tests/replica.py index b261a24f..c6583e75 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -11,19 +11,21 @@ from sys import exit class ReplicaTest(ProbackupTest, unittest.TestCase): def __init__(self, *args, **kwargs): - super(SomeTest, self).__init__(*args, **kwargs) + super(ReplicaTest, self).__init__(*args, **kwargs) @classmethod def tearDownClass(cls): stop_all() + # @unittest.skip("skip") + # @unittest.expectedFailure def test_make_simple_replica(self): """ make node with archiving, make stream backup, get Recovery Time, try to make pitr to Recovery Time """ fname = self.id().split('.')[3] - master = self.make_simple_node(base_dir="tmp_dirs/pgpro561/{0}/master".format(fname), + master = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/master".format(fname), set_archiving=True, set_replication=True, initdb_params=['--data-checksums'], @@ -31,30 +33,20 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): ) master.start() - slave = self.make_simple_node(base_dir="tmp_dirs/pgpro561/{0}/replica".format(fname), - set_archiving=True, - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={'wal_level': 'replica', 'max_wal_senders': '2'} - ) + slave = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/slave".format(fname)) slave_port = slave.port slave.cleanup() self.assertEqual(self.init_pb(master), six.b("")) - self.backup_pb(master, backup_type='full') + self.backup_pb(master, backup_type='full', options=['--stream']) master.psql( "postgres", "create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") - # for i in idx_ptrack: - # if idx_ptrack[i]['type'] == 'heap': - # continue - # master.psql("postgres", "create index {0} on {1} using {2}({3})".format( - # i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) before = master.execute("postgres", "SELECT * FROM t_heap") - id = self.backup_pb(master, backup_type='page') + id = self.backup_pb(master, backup_type='page', options=['--stream']) self.restore_pb(backup_dir=self.backup_dir(master), data_dir=slave.data_dir) slave.append_conf('postgresql.auto.conf', 'port = {0}'.format(slave.port)) slave.append_conf('postgresql.auto.conf', 'hot_standby = on') @@ -66,3 +58,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): after = slave.execute("postgres", "SELECT * FROM t_heap") self.assertEqual(before, after) + + self.assertEqual(self.init_pb(slave), six.b("")) + self.backup_pb(slave, backup_type='full', options=['--stream']) diff --git a/tests/restore_test.py b/tests/restore_test.py index f4e4e2b1..594d3083 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -18,7 +18,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase): def tearDownClass(cls): stop_all() -# @unittest.skip("123") + # @unittest.skip("skip") + # @unittest.expectedFailure def test_restore_full_to_latest(self): """recovery to latest from full backup""" fname = self.id().split('.')[3] @@ -34,17 +35,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase): pgbench.wait() pgbench.stdout.close() before = node.execute("postgres", "SELECT * FROM pgbench_branches") - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, options=["--verbose"])) + self.backup_pb(node) node.stop({"-m": "immediate"}) node.cleanup() # 1 - Test recovery from latest -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, options=["-j", "4", "--verbose"]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, options=["-j", "4"])) # 2 - Test that recovery.conf was created recovery_conf = path.join(node.data_dir, "recovery.conf") @@ -70,24 +68,21 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.pgbench_init(scale=2) with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, options=["--verbose"])) + backup_log.write(self.backup_pb(node)) pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) + self.backup_pb(node, backup_type="page") before = node.execute("postgres", "SELECT * FROM pgbench_branches") node.stop({"-m": "immediate"}) node.cleanup() -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, options=["-j", "4", "--verbose"]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, options=["-j", "4"])) node.start({"-t": "600"}) @@ -110,17 +105,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase): before = node.execute("postgres", "SELECT * FROM pgbench_branches") - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"])) + self.backup_pb(node, backup_type="full") target_tli = int(node.get_control_data()[six.b("Latest checkpoint's TimeLineID")]) node.stop({"-m": "immediate"}) node.cleanup() -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, options=["-j", "4", "--verbose"]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, options=["-j", "4"])) node.start({"-t": "600"}) @@ -128,17 +120,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase): pgbench.wait() pgbench.stdout.close() - with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"])) + self.backup_pb(node, backup_type="full") node.stop({"-m": "immediate"}) node.cleanup() -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, - options=["-j", "4", "--verbose", "--timeline=%i" % target_tli]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, + options=["-j", "4", "--timeline=%i" % target_tli])) recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] self.assertEqual(int(recovery_target_timeline), target_tli) @@ -164,8 +153,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase): before = node.execute("postgres", "SELECT * FROM pgbench_branches") - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"])) + self.backup_pb(node, backup_type="full") target_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -175,11 +163,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.stop({"-m": "immediate"}) node.cleanup() -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, - options=["-j", "4", "--verbose", '--time="%s"' % target_time]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, + options=["-j", "4", '--time="%s"' % target_time])) node.start({"-t": "600"}) @@ -203,8 +189,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase): con.execute("CREATE TABLE tbl0005 (a text)") con.commit() - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"])) + self.backup_pb(node, backup_type="full") pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() @@ -228,11 +213,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.stop({"-m": "fast"}) node.cleanup() -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, - options=["-j", "4", "--verbose", '--xid=%s' % target_xid]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, + options=["-j", "4", '--xid=%s' % target_xid])) node.start({"-t": "600"}) @@ -261,25 +244,21 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.append_conf("postgresql.conf", "ptrack_enable = on") node.restart() - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"])) + self.backup_pb(node, backup_type="full") pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose"])) + self.backup_pb(node, backup_type="ptrack") before = node.execute("postgres", "SELECT * FROM pgbench_branches") node.stop({"-m": "immediate"}) node.cleanup() -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, options=["-j", "4", "--verbose"]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, options=["-j", "4"])) node.start({"-t": "600"}) @@ -308,32 +287,27 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.append_conf("postgresql.conf", "ptrack_enable = on") node.restart() - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"])) + self.backup_pb(node, backup_type="full") pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose"])) + self.backup_pb(node, backup_type="ptrack") pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - with open(path.join(node.logs_dir, "backup_3.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose"])) + self.backup_pb(node, backup_type="ptrack") before = node.execute("postgres", "SELECT * FROM pgbench_branches") node.stop({"-m": "immediate"}) node.cleanup() -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, options=["-j", "4", "--verbose"]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, options=["-j", "4"])) node.start({"-t": "600"}) @@ -359,25 +333,21 @@ class RestoreTest(ProbackupTest, unittest.TestCase): self.skipTest("ptrack not supported") return - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose", "--stream"])) + self.backup_pb(node, backup_type="full", options=["--stream"]) pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "--stream"])) + self.backup_pb(node, backup_type="ptrack", options=["--stream"]) before = node.execute("postgres", "SELECT * FROM pgbench_branches") node.stop({"-m": "immediate"}) node.cleanup() -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, options=["-j", "4", "--verbose"]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, options=["-j", "4"])) node.start({"-t": "600"}) @@ -406,8 +376,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase): return node.restart() - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"])) + self.backup_pb(node, backup_type="full") pgbench = node.pgbench( stdout=subprocess.PIPE, @@ -415,8 +384,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase): options=["-c", "4", "-T", "8"] ) - with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "--stream"])) + self.backup_pb(node, backup_type="ptrack", options=["--stream"]) pgbench.wait() pgbench.stdout.close() @@ -430,10 +398,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase): self.wrong_wal_clean(node, wal_segment_size) -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, options=["-j", "4", "--verbose"]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, options=["-j", "4"])) node.start({"-t": "600"}) @@ -471,14 +437,12 @@ class RestoreTest(ProbackupTest, unittest.TestCase): options=["-c", "4", "-T", "8"] ) - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"])) + self.backup_pb(node, backup_type="full") pgbench.wait() pgbench.stdout.close() - with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "--stream"])) + self.backup_pb(node, backup_type="ptrack", options=["--stream"]) bbalance = node.execute("postgres", "SELECT sum(bbalance) FROM pgbench_branches") delta = node.execute("postgres", "SELECT sum(delta) FROM pgbench_history") @@ -489,10 +453,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.cleanup() self.wrong_wal_clean(node, wal_segment_size) -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, options=["-j", "4", "--verbose"]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, options=["-j", "4"])) node.start({"-t": "600"}) @@ -518,8 +480,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase): con.execute("CREATE TABLE tbl0005 (a text)") con.commit() - with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"])) + self.backup_pb(node, backup_type="full") pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() @@ -543,15 +504,12 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.stop({"-m": "fast"}) node.cleanup() -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete"), - self.restore_pb(node, + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, options=[ "-j", "4", - "--verbose", '--xid=%s' % target_xid, - "--inclusive=false"]) -# ) + "--inclusive=false"])) node.start({"-t": "600"}) @@ -590,33 +548,27 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.stop() try: self.restore_pb(node) - # we should die here because exception is what we expect to happen - exit(1) + assertEqual(1, 0, 'Error is expected because restore destination is not empty') except ProbackupException, e: self.assertEqual( e.message, - 'ERROR: restore destination is not empty: "{0}"\n'.format(node.data_dir) - ) + 'ERROR: restore destination is not empty: "{0}"\n'.format(node.data_dir)) # 2 - Try to restore to existing tablespace directory shutil.rmtree(node.data_dir) try: self.restore_pb(node) - # we should die here because exception is what we expect to happen - exit(1) + assertEqual(1, 0, 'Error is expected because restore tablespace destination is not empty') except ProbackupException, e: self.assertEqual( e.message, - 'ERROR: restore tablespace destination is not empty: "{0}"\n'.format(tblspc_path) - ) + 'ERROR: restore tablespace destination is not empty: "{0}"\n'.format(tblspc_path)) # 3 - Restore using tablespace-mapping tblspc_path_new = path.join(node.base_dir, "tblspc_new") -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete."), - self.restore_pb(node, - options=["-T", "%s=%s" % (tblspc_path, tblspc_path_new)]) -# ) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, + options=["-T", "%s=%s" % (tblspc_path, tblspc_path_new)])) node.start() id = node.execute("postgres", "SELECT id FROM test") @@ -636,11 +588,10 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.stop() node.cleanup() tblspc_path_page = path.join(node.base_dir, "tblspc_page") -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete."), - self.restore_pb(node, - options=["-T", "%s=%s" % (tblspc_path_new, tblspc_path_page)]) -# ) + + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, + options=["-T", "%s=%s" % (tblspc_path_new, tblspc_path_page)])) node.start() id = node.execute("postgres", "SELECT id FROM test OFFSET 1") @@ -696,13 +647,10 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.cleanup() tblspc_path_new = path.join(node.base_dir, "tblspc_new") -# exit(1) -# TODO WAITING FIX FOR RESTORE -# self.assertIn(six.b("INFO: restore complete."), - self.restore_pb(node, - options=["-T", "%s=%s" % (tblspc_path, tblspc_path_new)]) -# ) - # Check tables + + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, + options=["-T", "%s=%s" % (tblspc_path, tblspc_path_new)])) node.start() count = node.execute("postgres", "SELECT count(*) FROM tbl") @@ -732,7 +680,10 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)}) node.cleanup() - self.restore_pb(node, options=['--time="{0}"'.format(recovery_time)]) + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb( + node, options=['--time="{0}"'.format(recovery_time)])) + node.start({"-t": "600"}) self.assertEqual(True, node.status()) node.stop() @@ -757,9 +708,11 @@ class RestoreTest(ProbackupTest, unittest.TestCase): node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)}) node.cleanup() recovery_time = self.show_pb(node, id=id)['recovery-time'] - self.restore_pb(node, - options=["-j", "4", '--time="{0}"'.format(recovery_time)] - ) + + self.assertTrue(six.b("INFO: Restore of backup") and + six.b("completed.") in self.restore_pb(node, + options=["-j", "4", '--time="{0}"'.format(recovery_time)])) + node.start({"-t": "600"}) res = node.psql("postgres", 'select * from t_heap') diff --git a/tests/retention_test.py b/tests/retention_test.py index 265ed8da..6ef8d171 100644 --- a/tests/retention_test.py +++ b/tests/retention_test.py @@ -15,7 +15,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase): def tearDownClass(cls): stop_all() -# @unittest.skip("123") + # @unittest.skip("skip") + # @unittest.expectedFailure def test_retention_redundancy_1(self): """purge backups using redundancy-based retention policy""" fname = self.id().split('.')[3] diff --git a/tests/show_test.py b/tests/show_test.py index 87bba77e..7db51879 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -15,6 +15,8 @@ class OptionTest(ProbackupTest, unittest.TestCase): def tearDownClass(cls): stop_all() + # @unittest.skip("skip") + # @unittest.expectedFailure def show_test_1(self): """Status DONE and OK""" fname = self.id().split('.')[3] diff --git a/tests/validate_test.py b/tests/validate_test.py index 1c67b2fc..70fd35d5 100644 --- a/tests/validate_test.py +++ b/tests/validate_test.py @@ -13,43 +13,13 @@ class ValidateTest(ProbackupTest, unittest.TestCase): def __init__(self, *args, **kwargs): super(ValidateTest, self).__init__(*args, **kwargs) -# @classmethod -# def tearDownClass(cls): -# try: -# stop_all() -# except: -# pass + @classmethod + def tearDownClass(cls): + stop_all() -# @unittest.skip("123") - def test_validate_time(self): - """recovery to latest from full backup. Expect to Fail""" - fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname), - set_archiving=True, - initdb_params=['--data-checksums'], - pg_options={'wal_level': 'replica'} - ) - node.start() - - node.pgbench_init(scale=2) - pgbench = node.pgbench( - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=["-c", "4", "-T", "10"] - ) - pgbench.wait() - pgbench.stdout.close() - - self.assertEqual(self.init_pb(node), six.b("")) - id = self.backup_pb(node) - recovery_time = self.show_pb(node, id=id)['recovery-time'] - - self.assertIn(six.b("INFO: backup validation completed successfully on"), - self.validate_pb(node, options=["--time='{0}'".format(recovery_time)])) - node.stop() - -# @unittest.skip("123") - def test_validate_wal_1(self): + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_validate_wal_unreal_values(self): """recovery to latest from full backup""" fname = self.id().split('.')[3] node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname), @@ -65,8 +35,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase): con.execute("CREATE TABLE tbl0005 (a text)") con.commit() - with open(os.path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: - backup_log.write(self.backup_pb(node, options=["--verbose"])) + self.backup_pb(node) node.pgbench_init(scale=2) pgbench = node.pgbench( @@ -78,7 +47,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase): pgbench.wait() pgbench.stdout.close() - id_backup = self.show_pb(node)[0]['ID'] + backup_id = self.show_pb(node)[0]['ID'] target_time = self.show_pb(node)[0]['Recovery time'] after_backup_time = datetime.now() @@ -91,24 +60,19 @@ class ValidateTest(ProbackupTest, unittest.TestCase): self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format( after_backup_time - timedelta(days=2))]) # we should die here because exception is what we expect to happen - exit(1) + self.assertEqual(1, 0, "Error in validation is expected because of validation of unreal time") except ProbackupException, e: self.assertEqual( e.message, - 'ERROR: Full backup satisfying target options is not found.\n' - ) + 'ERROR: Full backup satisfying target options is not found.\n') # Validate to unreal time #2 try: self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format( after_backup_time + timedelta(days=2))]) - # we should die here because exception is what we expect to happen - exit(1) + self.assertEqual(1, 0, "Error in validation is expected because of validation unreal time") except ProbackupException, e: - self.assertEqual( - True, - 'ERROR: not enough WAL records to time' in e.message - ) + self.assertEqual(True, 'ERROR: not enough WAL records to time' in e.message) # Validate to real xid target_xid = None @@ -124,19 +88,32 @@ class ValidateTest(ProbackupTest, unittest.TestCase): # Validate to unreal xid try: self.validate_pb(node, options=["--xid=%d" % (int(target_xid) + 1000)]) - # we should die here because exception is what we expect to happen - exit(1) + self.assertEqual(1, 0, "Error in validation is expected because of validation of unreal xid") except ProbackupException, e: - self.assertEqual( - True, - 'ERROR: not enough WAL records to xid' in e.message - ) + self.assertEqual(True, 'ERROR: not enough WAL records to xid' in e.message) # Validate with backup ID self.assertIn(six.b("INFO: backup validation completed successfully on"), - self.validate_pb(node, id_backup)) + self.validate_pb(node, backup_id)) - # Validate broken WAL + def test_validate_broken_wal_1(self): + """recovery to latest from full backup""" + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname), + set_archiving=True, + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica'} + ) + + node.start() + self.assertEqual(self.init_pb(node), six.b("")) + with node.connect("postgres") as con: + con.execute("CREATE TABLE tbl0005 (a text)") + con.commit() + + backup_id = self.backup_pb(node) + + # Corrupt WAL wals_dir = os.path.join(self.backup_dir(node), "wal") wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] wals.sort() @@ -146,29 +123,60 @@ class ValidateTest(ProbackupTest, unittest.TestCase): f.write(six.b("blablabla")) f.close - try: - self.validate_pb(node, id_backup, options=['--xid=%s' % target_xid]) - # we should die here because exception is what we expect to happen - exit(1) - except ProbackupException, e: - self.assertEqual( - True, - 'Possible WAL CORRUPTION' in e.message - ) - + # Simple validate try: self.validate_pb(node) - # we should die here because exception is what we expect to happen - exit(1) + self.assertEqual(1, 0, "Expecting Error because of wal corruption. THIS IS BAD") except ProbackupException, e: - self.assertEqual( - True, - 'Possible WAL CORRUPTION' in e.message - ) + self.assertEqual(True, 'Possible WAL CORRUPTION' in e.message) + + self.assertEqual('CORRUPT', self.show_pb(node, id=backup_id)['status'], 'Backup STATUS should be "CORRUPT"') node.stop() -# @unittest.skip("123") + def test_validate_broken_wal_2(self): + """recovery to latest from full backup""" + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname), + set_archiving=True, + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica'} + ) + + node.start() + self.assertEqual(self.init_pb(node), six.b("")) + with node.connect("postgres") as con: + con.execute("CREATE TABLE tbl0005 (a text)") + con.commit() + + backup_id = self.backup_pb(node) + target_xid = None + with node.connect("postgres") as con: + res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") + con.commit() + target_xid = res[0][0] + node.execute("postgres", "SELECT pg_switch_xlog()") + + # Corrupt WAL + wals_dir = os.path.join(self.backup_dir(node), "wal") + wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] + wals.sort() + for wal in wals: + f = open(os.path.join(wals_dir, wal), "rb+") + f.seek(256) + f.write(six.b("blablabla")) + f.close + + # Simple validate + try: + self.validate_pb(node, backup_id, options=['--xid=%s' % target_xid]) + self.assertEqual(1, 0, "Expecting Error because of wal corruption. THIS IS BAD") + except ProbackupException, e: + self.assertEqual(True, 'Possible WAL CORRUPTION' in e.message) + + self.assertEqual('CORRUPT', self.show_pb(node, id=backup_id)['status'], 'Backup STATUS should be "CORRUPT"') + node.stop() + def test_validate_wal_lost_segment_1(self): """Loose segment which belong to some backup""" fname = self.id().split('.')[3] @@ -188,25 +196,23 @@ class ValidateTest(ProbackupTest, unittest.TestCase): ) pgbench.wait() pgbench.stdout.close() - self.backup_pb(node, backup_type='full') + backup_id = self.backup_pb(node, backup_type='full') + # Delete wal segment wals_dir = os.path.join(self.backup_dir(node), "wal") wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] os.remove(os.path.join(self.backup_dir(node), "wal", wals[1])) try: self.validate_pb(node) - # we should die here because exception is what we expect to happen - exit(1) + self.assertEqual(1, 0, "Expecting Error because of wal segment disappearance") except ProbackupException, e: - self.assertEqual( - True, - 'is absent' in e.message - ) + self.assertEqual(True, 'is absent' in e.message) + + self.assertEqual('CORRUPT', self.show_pb(node, id=backup_id)['status'], 'Backup STATUS should be "CORRUPT"') node.stop() -# @unittest.skip("123") def test_validate_wal_lost_segment_2(self): - """Loose segment located between backups. Expect to fail """ + """Loose segment located between backups""" fname = self.id().split('.')[3] node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname), set_archiving=True, @@ -216,6 +222,9 @@ class ValidateTest(ProbackupTest, unittest.TestCase): node.start() self.assertEqual(self.init_pb(node), six.b("")) + self.backup_pb(node, backup_type='full') + + # make some wals node.pgbench_init(scale=2) pgbench = node.pgbench( stdout=subprocess.PIPE, @@ -224,38 +233,17 @@ class ValidateTest(ProbackupTest, unittest.TestCase): ) pgbench.wait() pgbench.stdout.close() - self.backup_pb(node, backup_type='full') - - # need to do that to find segment between(!) backups - node.psql("postgres", "CREATE TABLE t1(a int)") - node.psql("postgres", "SELECT pg_switch_xlog()") - node.psql("postgres", "CREATE TABLE t2(a int)") - node.psql("postgres", "SELECT pg_switch_xlog()") + # delete last wal segment wals_dir = os.path.join(self.backup_dir(node), "wal") wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] wals = map(int, wals) - - # delete last wal segment - #print os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals))) os.remove(os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals)))) # Need more accurate error message about loosing wal segment between backups try: self.backup_pb(node, backup_type='page') - # we should die here because exception is what we expect to happen - exit(1) + self.assertEqual(1, 0, "Expecting Error in PAGE backup because of wal segment disappearance") except ProbackupException, e: - self.assertEqual( - True, - 'could not read WAL record' in e.message - ) - self.delete_pb(node, id=self.show_pb(node)[1]['ID']) - - ##### Hole Smokes, Batman! We just lost a wal segment and know nothing about it - ##### We need archive-push ASAP - self.backup_pb(node, backup_type='full') - self.assertEqual(False, 'validation completed successfully' in self.validate_pb(node)) - ######## - + self.assertEqual(True, 'could not read WAL record' in e.message) node.stop()