You've already forked pg_probackup
							
							
				mirror of
				https://github.com/postgrespro/pg_probackup.git
				synced 2025-10-31 00:17:52 +02:00 
			
		
		
		
	ptrack, validate, retention fixes
This commit is contained in:
		| @@ -8,7 +8,6 @@ from . import init_test, option_test, show_test, \ | ||||
|     ptrack_vacuum_full, ptrack_vacuum_truncate | ||||
|  | ||||
|  | ||||
|  | ||||
| def load_tests(loader, tests, pattern): | ||||
|     suite = unittest.TestSuite() | ||||
|     suite.addTests(loader.loadTestsFromModule(init_test)) | ||||
| @@ -17,8 +16,8 @@ def load_tests(loader, tests, pattern): | ||||
|     suite.addTests(loader.loadTestsFromModule(backup_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(delete_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(restore_test)) | ||||
| #    suite.addTests(loader.loadTestsFromModule(validate_test)) | ||||
| #    suite.addTests(loader.loadTestsFromModule(retention_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(validate_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(retention_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_clean)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_cluster)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace)) | ||||
|   | ||||
| @@ -133,11 +133,10 @@ class OptionTest(ProbackupTest, unittest.TestCase): | ||||
|         with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
|             conf.write("retention-redundancy=1\n") | ||||
|  | ||||
| #       TODO AFTER PGPRO-505 | ||||
| #        self.assertEqual( | ||||
| #            self.retention_show(node, ["--redundancy", "2"]), | ||||
| #            six.b("# retention policy\nREDUNDANCY=2\n") | ||||
| #        ) | ||||
|         self.assertEqual( | ||||
|             self.show_config(node)['retention-redundancy'], | ||||
|             six.b('1') | ||||
|         ) | ||||
|  | ||||
|         # User cannot send --system-identifier parameter via command line | ||||
|         try: | ||||
|   | ||||
| @@ -44,7 +44,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['size']) | ||||
|             self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) | ||||
|  | ||||
| @@ -61,7 +61,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['size']) | ||||
|             # check that ptrack bits are cleaned | ||||
|             self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) | ||||
| @@ -80,7 +80,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['size']) | ||||
|             # check that ptrack bits are cleaned | ||||
|             self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) | ||||
|   | ||||
| @@ -62,7 +62,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
| @@ -121,7 +121,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
| @@ -180,7 +180,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
| @@ -239,7 +239,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
| @@ -298,7 +298,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|   | ||||
| @@ -172,6 +172,8 @@ class ProbackupTest(object): | ||||
|  | ||||
|         return node | ||||
|  | ||||
| #    def print_started(self, fname): | ||||
| #        print  | ||||
|  | ||||
|     def make_simple_node(self, base_dir=None, set_replication=False, | ||||
|                         set_archiving=False, initdb_params=[], pg_options={}): | ||||
| @@ -234,23 +236,21 @@ class ProbackupTest(object): | ||||
|         os.close(file) | ||||
|         return md5_per_page | ||||
|  | ||||
|     def get_ptrack_bits_per_for_fork(self, file, size): | ||||
|     def get_ptrack_bits_per_page_for_fork(self, file, size): | ||||
|         ptrack_bits_for_fork = [] | ||||
|         byte_size = os.path.getsize(file + '_ptrack') | ||||
|         byte_size_minus_header = byte_size - 24 | ||||
|         file = os.open(file + '_ptrack', os.O_RDONLY) | ||||
|         os.lseek(file, 24, 0) | ||||
|         lot_of_bytes = os.read(file, byte_size_minus_header) | ||||
|         ptrack_bits_per_for_fork = [] | ||||
|         for byte in lot_of_bytes: | ||||
|             byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1] | ||||
| #            byte_to_bits = (byte >> x) & 1 for x in range(7, -1, -1) | ||||
|             for bit in byte_inverted: | ||||
|                 while len(ptrack_bits_per_for_fork) != size: | ||||
|                     ptrack_bits_per_for_fork.append(int(bit)) | ||||
| #        print 'Size: {}'.format(size) | ||||
| #        print ptrack_bits_per_for_fork | ||||
|                 if len(ptrack_bits_for_fork) < size: | ||||
|                     ptrack_bits_for_fork.append(int(bit)) | ||||
|         os.close(file) | ||||
|         return ptrack_bits_per_for_fork | ||||
|         return ptrack_bits_for_fork | ||||
|  | ||||
|     def check_ptrack_sanity(self, idx_dict): | ||||
|         success = True | ||||
| @@ -284,7 +284,7 @@ class ProbackupTest(object): | ||||
|                         PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]) | ||||
|                     print idx_dict | ||||
|                     if PageNum == 0 and idx_dict['type'] == 'spgist': | ||||
|                         print 'SPGIST is a special showflake, so don`t freat about losing ptrack for blknum 0' | ||||
|                         print 'SPGIST is a special snowflake, so don`t fret about losing ptrack for blknum 0' | ||||
|                         continue | ||||
|                     success = False | ||||
|             else: | ||||
| @@ -468,19 +468,26 @@ class ProbackupTest(object): | ||||
|         # print(cmd_list) | ||||
|         return self.run_pb(cmd_list + options) | ||||
|  | ||||
|     def retention_purge_pb(self, node, options=[]): | ||||
|     def delete_expired(self, node, options=[]): | ||||
|         cmd_list = [ | ||||
|             "retention", "purge", | ||||
|             "delete", "--expired", | ||||
|             "-B", self.backup_dir(node), | ||||
|         ] | ||||
|         return self.run_pb(cmd_list + options) | ||||
|  | ||||
|     def retention_show(self, node, options=[]): | ||||
|     def show_config(self, node): | ||||
|         out_dict = {} | ||||
|         cmd_list = [ | ||||
|             "config", | ||||
|             "show-config", | ||||
|             "-B", self.backup_dir(node), | ||||
|         ] | ||||
|         return self.run_pb(cmd_list + options) | ||||
|         res = self.run_pb(cmd_list).splitlines() | ||||
|         for line in res: | ||||
|             if not line.startswith('#'): | ||||
|                 name, var = line.partition(" = ")[::2] | ||||
|                 out_dict[name] = var | ||||
|         return out_dict | ||||
|  | ||||
|  | ||||
|     def get_recovery_conf(self, node): | ||||
|         out_dict = {} | ||||
|   | ||||
| @@ -48,7 +48,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['size']) | ||||
|             # check that ptrack has correct bits after recovery | ||||
|             self.check_ptrack_recovery(idx_ptrack[i]) | ||||
|   | ||||
| @@ -50,7 +50,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['size']) | ||||
|             # check that ptrack has correct bits after recovery | ||||
|             self.check_ptrack_recovery(idx_ptrack[i]) | ||||
|   | ||||
| @@ -1,21 +1,8 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| from testgres import get_new_node, stop_all | ||||
| #import os | ||||
| from os import path, open, lseek, read, close, O_RDONLY | ||||
| from .ptrack_helpers import ProbackupTest, idx_ptrack | ||||
|  | ||||
| #        res = node.execute('postgres', 'show fsync') | ||||
| #        print res[0][0] | ||||
| #        res = node.execute('postgres', 'show wal_level') | ||||
| #        print res[0][0] | ||||
| #        a = ProbackupTest | ||||
| #        res = node.execute('postgres', 'select 1')` | ||||
| #        self.assertEqual(len(res), 1) | ||||
| #        self.assertEqual(res[0][0], 1) | ||||
| #        node.stop() | ||||
| #        a = self.backup_dir(node) | ||||
|  | ||||
|  | ||||
| class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
| @@ -63,7 +50,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|         for i in idx_ptrack: | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) | ||||
|             self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size']) | ||||
|  | ||||
| @@ -81,7 +68,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|   | ||||
| @@ -59,7 +59,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|   | ||||
| @@ -59,7 +59,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|   | ||||
| @@ -26,8 +26,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|         stop_all() | ||||
|  | ||||
|     def test_ptrack_vacuum_full(self): | ||||
|         print 'test_ptrack_vacuum_full started' | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_vacuum_full", | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname), | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
| @@ -73,7 +74,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity, the most important part | ||||
|   | ||||
| @@ -61,7 +61,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['new_size']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|   | ||||
| @@ -14,10 +14,11 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(RestoreTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
| #    @classmethod | ||||
| #    def tearDownClass(cls): | ||||
| #        stop_all() | ||||
|     @classmethod | ||||
|     def tearDownClass(cls): | ||||
|         stop_all() | ||||
|  | ||||
| #    @unittest.skip("123") | ||||
|     def test_restore_full_to_latest(self): | ||||
|         """recovery to latest from full backup""" | ||||
|         fname = self.id().split('.')[3] | ||||
| @@ -366,12 +367,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|             self.skipTest("ptrack not supported") | ||||
|             return | ||||
|  | ||||
| #        node.append_conf("pg_hba.conf", "local replication all trust") | ||||
| #        node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust") | ||||
| #        node.append_conf("postgresql.conf", "ptrack_enable = on") | ||||
| #        node.append_conf("postgresql.conf", "max_wal_senders = 1") | ||||
| #        node.restart() | ||||
|  | ||||
|         with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose", "--stream"])) | ||||
|  | ||||
| @@ -400,11 +395,12 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|         node.stop() | ||||
|  | ||||
|     def test_restore_full_ptrack_under_load(self): | ||||
|         """recovery to latest from full + page backups with loads when ptrack backup do""" | ||||
|         """recovery to latest from full + ptrack backups with loads when ptrack backup do""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'} | ||||
|             ) | ||||
| @@ -417,11 +413,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|             node.stop() | ||||
|             self.skipTest("ptrack not supported") | ||||
|             return | ||||
|  | ||||
|         #node.append_conf("pg_hba.conf", "local replication all trust") | ||||
|         #node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust") | ||||
|         #node.append_conf("postgresql.conf", "ptrack_enable = on") | ||||
|         #node.append_conf("postgresql.conf", "max_wal_senders = 1") | ||||
|         node.restart() | ||||
|  | ||||
|         with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: | ||||
| @@ -439,8 +430,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|         pgbench.wait() | ||||
|         pgbench.stdout.close() | ||||
|  | ||||
|         node.execute("postgres", "SELECT pg_switch_xlog()") | ||||
|  | ||||
|         bbalance = node.execute("postgres", "SELECT sum(bbalance) FROM pgbench_branches") | ||||
|         delta = node.execute("postgres", "SELECT sum(delta) FROM pgbench_history") | ||||
|  | ||||
| @@ -470,6 +459,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'} | ||||
|             ) | ||||
| @@ -483,10 +473,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|             self.skipTest("ptrack not supported") | ||||
|             return | ||||
|  | ||||
|         #node.append_conf("pg_hba.conf", "local replication all trust") | ||||
|         #node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust") | ||||
|         #node.append_conf("postgresql.conf", "ptrack_enable = on") | ||||
|         #node.append_conf("postgresql.conf", "max_wal_senders = 1") | ||||
|         node.restart() | ||||
|  | ||||
|         pgbench = node.pgbench( | ||||
| @@ -504,8 +490,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|         with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "--stream"])) | ||||
|  | ||||
|         node.execute("postgres", "SELECT pg_switch_xlog()") | ||||
|  | ||||
|         bbalance = node.execute("postgres", "SELECT sum(bbalance) FROM pgbench_branches") | ||||
|         delta = node.execute("postgres", "SELECT sum(delta) FROM pgbench_history") | ||||
|  | ||||
| @@ -638,9 +622,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|                 'ERROR: restore tablespace destination is not empty: "{0}"\n'.format(tblspc_path) | ||||
|                 ) | ||||
|  | ||||
| #        self.assertIn(six.b("ERROR: restore tablespace destination is not empty"), | ||||
| #            self.restore_pb(node)) | ||||
|  | ||||
|         # 3 - Restore using tablespace-mapping | ||||
|         tblspc_path_new = path.join(node.base_dir, "tblspc_new") | ||||
| #        TODO WAITING FIX FOR RESTORE | ||||
| @@ -662,7 +643,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|  | ||||
|         show_pb = self.show_pb(node) | ||||
|         self.assertEqual(show_pb[1]['Status'], six.b("OK")) | ||||
|         self.assertEqual(show_pb[2]['Status'], six.b("OK"))# | ||||
|         self.assertEqual(show_pb[2]['Status'], six.b("OK")) | ||||
|  | ||||
|         node.stop() | ||||
|         node.cleanup() | ||||
| @@ -677,7 +658,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|         id = node.execute("postgres", "SELECT id FROM test OFFSET 1") | ||||
|         self.assertEqual(id[0][0], 2) | ||||
|  | ||||
|         #node.stop() | ||||
|         node.stop() | ||||
|  | ||||
|     def test_restore_with_tablespace_mapping_2(self): | ||||
|         """recovery using tablespace-mapping option and page backup""" | ||||
| @@ -728,7 +709,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase): | ||||
|         node.cleanup() | ||||
|  | ||||
|         tblspc_path_new = path.join(node.base_dir, "tblspc_new") | ||||
|         print tblspc_path_new | ||||
| #        exit(1) | ||||
| #        TODO WAITING FIX FOR RESTORE | ||||
| #        self.assertIn(six.b("INFO: restore complete."), | ||||
|   | ||||
| @@ -2,88 +2,104 @@ import unittest | ||||
| import os | ||||
| from datetime import datetime, timedelta | ||||
| from os import path, listdir | ||||
| from .pb_lib import ProbackupTest | ||||
| from .ptrack_helpers import ProbackupTest | ||||
| from testgres import stop_all | ||||
|  | ||||
|  | ||||
| class RetentionTest(ProbackupTest, unittest.TestCase): | ||||
|  | ||||
| 	def __init__(self, *args, **kwargs): | ||||
| 		super(RetentionTest, self).__init__(*args, **kwargs) | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(RetentionTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
| 	@classmethod | ||||
| 	def tearDownClass(cls): | ||||
| 		stop_all() | ||||
|     @classmethod | ||||
|     def tearDownClass(cls): | ||||
|         stop_all() | ||||
|  | ||||
| 	def test_retention_redundancy_1(self): | ||||
| 		"""purge backups using redundancy-based retention policy""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/retention/retention_redundancy_1") | ||||
| 		node.start() | ||||
| #    @unittest.skip("123") | ||||
|     def test_retention_redundancy_1(self): | ||||
|         """purge backups using redundancy-based retention policy""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/retention/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|  | ||||
| 		self.init_pb(node) | ||||
| 		with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
| 			conf.write("REDUNDANCY=1\n") | ||||
|         node.start() | ||||
|  | ||||
| 		# Make backups to be purged | ||||
| 		self.backup_pb(node) | ||||
| 		self.backup_pb(node, backup_type="page") | ||||
| 		# Make backups to be keeped | ||||
| 		self.backup_pb(node) | ||||
| 		self.backup_pb(node, backup_type="page") | ||||
|         self.init_pb(node) | ||||
|         with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
|             conf.write("retention-redundancy = 1\n") | ||||
|  | ||||
| 		self.assertEqual(len(self.show_pb(node)), 4) | ||||
|         # Make backups to be purged | ||||
|         self.backup_pb(node) | ||||
|         self.backup_pb(node, backup_type="page") | ||||
|         # Make backups to be keeped | ||||
|         self.backup_pb(node) | ||||
|         self.backup_pb(node, backup_type="page") | ||||
|  | ||||
| 		# Purge backups | ||||
| 		log = self.retention_purge_pb(node) | ||||
| 		self.assertEqual(len(self.show_pb(node)), 2) | ||||
|         self.assertEqual(len(self.show_pb(node)), 4) | ||||
|  | ||||
| 		# Check that WAL segments were deleted | ||||
| 		min_wal = None | ||||
| 		max_wal = None | ||||
| 		for line in log.splitlines(): | ||||
| 			if line.startswith(b"INFO: removed min WAL segment"): | ||||
| 				min_wal = line[31:-1] | ||||
| 			elif line.startswith(b"INFO: removed max WAL segment"): | ||||
| 				max_wal = line[31:-1] | ||||
| 		for wal_name in listdir(path.join(self.backup_dir(node), "wal")): | ||||
| 			if not wal_name.endswith(".backup"): | ||||
| 				wal_name_b = wal_name.encode('ascii') | ||||
| 				self.assertEqual(wal_name_b[8:] > min_wal[8:], True) | ||||
| 				self.assertEqual(wal_name_b[8:] > max_wal[8:], True) | ||||
|         # Purge backups | ||||
|         log = self.delete_expired(node) | ||||
|         self.assertEqual(len(self.show_pb(node)), 2) | ||||
|  | ||||
| 		node.stop() | ||||
|         # Check that WAL segments were deleted | ||||
|         min_wal = None | ||||
|         max_wal = None | ||||
|         for line in log.splitlines(): | ||||
|             if line.startswith(b"INFO: removed min WAL segment"): | ||||
|                 min_wal = line[31:-1] | ||||
|             elif line.startswith(b"INFO: removed max WAL segment"): | ||||
|                 max_wal = line[31:-1] | ||||
|         for wal_name in listdir(path.join(self.backup_dir(node), "wal")): | ||||
|             if not wal_name.endswith(".backup"): | ||||
|                 wal_name_b = wal_name.encode('ascii') | ||||
|                 self.assertEqual(wal_name_b[8:] > min_wal[8:], True) | ||||
|                 self.assertEqual(wal_name_b[8:] > max_wal[8:], True) | ||||
|  | ||||
| 	def test_retention_window_2(self): | ||||
| 		"""purge backups using window-based retention policy""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/retention/retention_window_2") | ||||
| 		node.start() | ||||
|         node.stop() | ||||
|  | ||||
| 		self.init_pb(node) | ||||
| 		with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
| 			conf.write("REDUNDANCY=1\n") | ||||
| 			conf.write("WINDOW=1\n") | ||||
| #    @unittest.skip("123") | ||||
|     def test_retention_window_2(self): | ||||
|         """purge backups using window-based retention policy""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/retention/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|  | ||||
| 		# Make backups to be purged | ||||
| 		self.backup_pb(node) | ||||
| 		self.backup_pb(node, backup_type="page") | ||||
| 		# Make backup to be keeped | ||||
| 		self.backup_pb(node) | ||||
|         node.start() | ||||
|  | ||||
| 		backups = path.join(self.backup_dir(node), "backups") | ||||
| 		days_delta = 5 | ||||
| 		for backup in listdir(backups): | ||||
| 			with open(path.join(backups, backup, "backup.conf"), "a") as conf: | ||||
| 				conf.write("RECOVERY_TIME='{:%Y-%m-%d %H:%M:%S}'\n".format( | ||||
| 					datetime.now() - timedelta(days=days_delta))) | ||||
| 				days_delta -= 1 | ||||
|         self.init_pb(node) | ||||
|         with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
|             conf.write("retention-redundancy = 1\n") | ||||
|             conf.write("retention-window = 1\n") | ||||
|  | ||||
| 		# Make backup to be keeped | ||||
| 		self.backup_pb(node, backup_type="page")		 | ||||
|         # Make backups to be purged | ||||
|         self.backup_pb(node) | ||||
|         self.backup_pb(node, backup_type="page") | ||||
|         # Make backup to be keeped | ||||
|         self.backup_pb(node) | ||||
|  | ||||
| 		self.assertEqual(len(self.show_pb(node)), 4) | ||||
|         backups = path.join(self.backup_dir(node), "backups") | ||||
|         days_delta = 5 | ||||
|         for backup in listdir(backups): | ||||
|             with open(path.join(backups, backup, "backup.control"), "a") as conf: | ||||
|                 conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format( | ||||
|                     datetime.now() - timedelta(days=days_delta))) | ||||
|                 days_delta -= 1 | ||||
|  | ||||
| 		# Purge backups | ||||
| 		self.retention_purge_pb(node) | ||||
| 		self.assertEqual(len(self.show_pb(node)), 2) | ||||
|         # Make backup to be keeped | ||||
|         self.backup_pb(node, backup_type="page") | ||||
|  | ||||
| 		node.stop() | ||||
|         self.assertEqual(len(self.show_pb(node)), 4) | ||||
|  | ||||
|         # Purge backups | ||||
|         self.delete_expired(node) | ||||
|         self.assertEqual(len(self.show_pb(node)), 2) | ||||
|  | ||||
|         node.stop() | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| import unittest | ||||
| from os import path, listdir | ||||
| import os | ||||
| import six | ||||
| from .pb_lib import ProbackupTest | ||||
| from .ptrack_helpers import ProbackupTest, ProbackupException | ||||
| from datetime import datetime, timedelta | ||||
| from testgres import stop_all | ||||
| import subprocess | ||||
| @@ -9,86 +9,227 @@ import subprocess | ||||
|  | ||||
| class ValidateTest(ProbackupTest, unittest.TestCase): | ||||
|  | ||||
| 	def __init__(self, *args, **kwargs): | ||||
| 		super(ValidateTest, self).__init__(*args, **kwargs) | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(ValidateTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
| 	@classmethod | ||||
| 	def tearDownClass(cls): | ||||
| 		try: | ||||
| 			stop_all() | ||||
| 		except: | ||||
| 			pass | ||||
| #    @classmethod | ||||
| #    def tearDownClass(cls): | ||||
| #        try: | ||||
| #            stop_all() | ||||
| #        except: | ||||
| #            pass | ||||
|  | ||||
| 	def test_validate_wal_1(self): | ||||
| 		"""recovery to latest from full backup""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/validate/wal_1") | ||||
| 		node.start() | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
| 		node.pgbench_init(scale=2) | ||||
| 		with node.connect("postgres") as con: | ||||
| 			con.execute("CREATE TABLE tbl0005 (a text)") | ||||
| 			con.commit() | ||||
| #    @unittest.skip("123") | ||||
|     def test_validate_wal_1(self): | ||||
|         """recovery to latest from full backup""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '\n {0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|  | ||||
| 		with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         node.pgbench_init(scale=2) | ||||
|         with node.connect("postgres") as con: | ||||
|             con.execute("CREATE TABLE tbl0005 (a text)") | ||||
|             con.commit() | ||||
|  | ||||
| 		pgbench = node.pgbench( | ||||
| 			stdout=subprocess.PIPE, | ||||
| 			stderr=subprocess.STDOUT, | ||||
| 			options=["-c", "4", "-T", "10"] | ||||
| 		) | ||||
|         with open(os.path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|  | ||||
| 		pgbench.wait() | ||||
| 		pgbench.stdout.close() | ||||
|         pgbench = node.pgbench( | ||||
|             stdout=subprocess.PIPE, | ||||
|             stderr=subprocess.STDOUT, | ||||
|             options=["-c", "4", "-T", "10"] | ||||
|         ) | ||||
|  | ||||
| 		# Save time to validate | ||||
| 		target_time = datetime.now() | ||||
|         pgbench.wait() | ||||
|         pgbench.stdout.close() | ||||
|  | ||||
| 		target_xid = None | ||||
| 		with node.connect("postgres") as con: | ||||
| 			res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") | ||||
| 			con.commit() | ||||
| 			target_xid = res[0][0] | ||||
|         id_backup = self.show_pb(node)[0]['ID'] | ||||
|         target_time = self.show_pb(node)[0]['Recovery time'] | ||||
|         after_backup_time = datetime.now() | ||||
|  | ||||
| 		node.execute("postgres", "SELECT pg_switch_xlog()") | ||||
| 		node.stop({"-m": "smart"}) | ||||
|         # Validate to real time | ||||
|         self.assertIn(six.b("INFO: backup validation completed successfully on"), | ||||
|             self.validate_pb(node, options=["--time='{0}'".format(target_time)])) | ||||
|  | ||||
| 		id_backup = self.show_pb(node)[0].id | ||||
|         # Validate to unreal time | ||||
|         try: | ||||
|             self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format( | ||||
|                 after_backup_time - timedelta(days=2))]) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 'ERROR: Full backup satisfying target options is not found.\n' | ||||
|                 ) | ||||
|  | ||||
| 		# Validate to real time | ||||
| 		self.assertIn(six.b("INFO: backup validation completed successfully on"), | ||||
| 			self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format( | ||||
| 				target_time)])) | ||||
|         # Validate to unreal time #2 | ||||
|         try: | ||||
|             self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format( | ||||
|                 after_backup_time + timedelta(days=2))]) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 True, | ||||
|                 'ERROR: not enough WAL records to time' in e.message | ||||
|                 ) | ||||
|  | ||||
| 		# Validate to unreal time | ||||
| 		self.assertIn(six.b("ERROR: no full backup found, cannot validate."), | ||||
| 			self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format( | ||||
| 				target_time - timedelta(days=2))])) | ||||
|         # Validate to real xid | ||||
|         target_xid = None | ||||
|         with node.connect("postgres") as con: | ||||
|             res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") | ||||
|             con.commit() | ||||
|             target_xid = res[0][0] | ||||
|         node.execute("postgres", "SELECT pg_switch_xlog()") | ||||
|  | ||||
| 		# Validate to unreal time #2 | ||||
| 		self.assertIn(six.b("ERROR: not enough WAL records to time"), | ||||
| 			self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format( | ||||
| 				target_time + timedelta(days=2))])) | ||||
|         self.assertIn(six.b("INFO: backup validation completed successfully on"), | ||||
|             self.validate_pb(node, options=["--xid=%s" % target_xid])) | ||||
|  | ||||
| 		# Validate to real xid | ||||
| 		self.assertIn(six.b("INFO: backup validation completed successfully on"), | ||||
| 			self.validate_pb(node, options=["--xid=%s" % target_xid])) | ||||
|         # Validate to unreal xid | ||||
|         try: | ||||
|             self.validate_pb(node, options=["--xid=%d" % (int(target_xid) + 1000)]) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 True, | ||||
|                 'ERROR: not enough WAL records to xid' in e.message | ||||
|                 ) | ||||
|  | ||||
| 		# Validate to unreal xid | ||||
| 		self.assertIn(six.b("ERROR: not enough WAL records to xid"), | ||||
| 			self.validate_pb(node, options=["--xid=%d" % (int(target_xid) + 1000)])) | ||||
|         # Validate with backup ID | ||||
|         self.assertIn(six.b("INFO: backup validation completed successfully on"), | ||||
|             self.validate_pb(node, id_backup)) | ||||
|  | ||||
| 		# Validate with backup ID | ||||
| 		self.assertIn(six.b("INFO: backup validation completed successfully on"), | ||||
| 			self.validate_pb(node, id_backup)) | ||||
|         # Validate broken WAL | ||||
|         wals_dir = os.path.join(self.backup_dir(node), "wal") | ||||
|         wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] | ||||
|         wals.sort() | ||||
|         for wal in wals: | ||||
|             f = open(os.path.join(wals_dir, wal), "rb+") | ||||
|             f.seek(256) | ||||
|             f.write(six.b("blablabla")) | ||||
|             f.close | ||||
|  | ||||
| 		# Validate broken WAL | ||||
| 		wals_dir = path.join(self.backup_dir(node), "wal") | ||||
| 		wals = [f for f in listdir(wals_dir) if path.isfile(path.join(wals_dir, f))] | ||||
| 		wals.sort() | ||||
| 		with open(path.join(wals_dir, wals[-3]), "rb+") as f: | ||||
| 			f.seek(256) | ||||
| 			f.write(six.b("blablabla")) | ||||
|         try: | ||||
|             self.validate_pb(node, id_backup, options=['--xid=%s' % target_xid]) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 True, | ||||
|                 'Possible WAL CORRUPTION' in e.message | ||||
|                 ) | ||||
|  | ||||
| 		res = self.validate_pb(node, id_backup, options=['--xid=%s' % target_xid]) | ||||
| 		self.assertIn(six.b("not enough WAL records to xid"), res) | ||||
|         try: | ||||
|             self.validate_pb(node) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 True, | ||||
|                 'Possible WAL CORRUPTION' in e.message | ||||
|                 ) | ||||
|  | ||||
|         node.stop() | ||||
|  | ||||
| #    @unittest.skip("123") | ||||
|     def test_validate_wal_lost_segment_1(self): | ||||
|         """Loose segment which belong to some backup""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|  | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         node.pgbench_init(scale=2) | ||||
|         pgbench = node.pgbench( | ||||
|             stdout=subprocess.PIPE, | ||||
|             stderr=subprocess.STDOUT, | ||||
|             options=["-c", "4", "-T", "10"] | ||||
|         ) | ||||
|         pgbench.wait() | ||||
|         pgbench.stdout.close() | ||||
|         self.backup_pb(node, backup_type='full') | ||||
|  | ||||
|         wals_dir = os.path.join(self.backup_dir(node), "wal") | ||||
|         wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] | ||||
|         os.remove(os.path.join(self.backup_dir(node), "wal", wals[1])) | ||||
|         try: | ||||
|             self.validate_pb(node) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 True, | ||||
|                 'is absent' in e.message | ||||
|                 ) | ||||
|         node.stop() | ||||
|  | ||||
|     def test_validate_wal_lost_segment_2(self): | ||||
|         """Loose segment located between backups """ | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|  | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         node.pgbench_init(scale=2) | ||||
|         pgbench = node.pgbench( | ||||
|             stdout=subprocess.PIPE, | ||||
|             stderr=subprocess.STDOUT, | ||||
|             options=["-c", "4", "-T", "10"] | ||||
|         ) | ||||
|         pgbench.wait() | ||||
|         pgbench.stdout.close() | ||||
|         self.backup_pb(node, backup_type='full') | ||||
|  | ||||
|         # need to do that to find segment between(!) backups | ||||
|         node.psql("postgres", "CREATE TABLE t1(a int)") | ||||
|         node.psql("postgres", "SELECT pg_switch_xlog()") | ||||
|         node.psql("postgres", "CREATE TABLE t2(a int)") | ||||
|         node.psql("postgres", "SELECT pg_switch_xlog()") | ||||
|  | ||||
|         wals_dir = os.path.join(self.backup_dir(node), "wal") | ||||
|         wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')] | ||||
|         wals = map(int, wals) | ||||
|  | ||||
|         # delete last wal segment | ||||
|         print os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals))) | ||||
|         os.remove(os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals)))) | ||||
|  | ||||
|         # Need more accurate error message about loosing wal segment between backups | ||||
|         try: | ||||
|             self.backup_pb(node, backup_type='page') | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 True, | ||||
|                 'could not read WAL record' in e.message | ||||
|                 ) | ||||
|         self.delete_pb(node, id=self.show_pb(node)[1]['ID']) | ||||
|  | ||||
|  | ||||
|         ##### Hole Smokes, Batman! We just lost a wal segment and know nothing about it | ||||
|         ##### We need archive-push ASAP | ||||
|         self.backup_pb(node, backup_type='full') | ||||
|         self.assertEqual(False, | ||||
|                 'validation completed successfully' in self.validate_pb(node)) | ||||
|         ######## | ||||
|  | ||||
|         node.stop() | ||||
|   | ||||
		Reference in New Issue
	
	Block a user