You've already forked pg_probackup
							
							
				mirror of
				https://github.com/postgrespro/pg_probackup.git
				synced 2025-10-31 00:17:52 +02:00 
			
		
		
		
	ptrack tests added
This commit is contained in:
		
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -28,6 +28,7 @@ | ||||
| /env | ||||
| /tests/__pycache__/ | ||||
| /tests/tmp_dirs/ | ||||
| /tests/*pyc | ||||
|  | ||||
| # Extra files | ||||
| /datapagemap.c | ||||
|   | ||||
| @@ -1,19 +1,32 @@ | ||||
| import unittest | ||||
|  | ||||
| from . import init_test, option_test, show_test, \ | ||||
| 	backup_test, delete_test, restore_test, validate_test, \ | ||||
| 	retention_test | ||||
|     backup_test, delete_test, restore_test, validate_test, \ | ||||
|     retention_test, ptrack_clean, ptrack_cluster, \ | ||||
|     ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \ | ||||
|     ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \ | ||||
|     ptrack_vacuum_full, ptrack_vacuum_truncate | ||||
|  | ||||
|  | ||||
|  | ||||
| def load_tests(loader, tests, pattern): | ||||
| 	suite = unittest.TestSuite() | ||||
| 	suite.addTests(loader.loadTestsFromModule(init_test)) | ||||
| 	suite.addTests(loader.loadTestsFromModule(option_test)) | ||||
| 	suite.addTests(loader.loadTestsFromModule(show_test)) | ||||
| 	suite.addTests(loader.loadTestsFromModule(backup_test)) | ||||
| 	suite.addTests(loader.loadTestsFromModule(delete_test)) | ||||
| 	suite.addTests(loader.loadTestsFromModule(restore_test)) | ||||
| 	suite.addTests(loader.loadTestsFromModule(validate_test)) | ||||
| 	suite.addTests(loader.loadTestsFromModule(retention_test)) | ||||
|     suite = unittest.TestSuite() | ||||
|     suite.addTests(loader.loadTestsFromModule(init_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(option_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(show_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(backup_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(delete_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(restore_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(validate_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(retention_test)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_clean)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_cluster)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_recovery)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_vacuum)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_frozen)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_visibility)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_full)) | ||||
|     suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_truncate)) | ||||
|  | ||||
| 	return suite | ||||
|     return suite | ||||
|   | ||||
| @@ -1,153 +1,169 @@ | ||||
| import unittest | ||||
| from os import path, listdir | ||||
| import six | ||||
| from .pb_lib import ProbackupTest | ||||
| from .ptrack_helpers import ProbackupTest, ProbackupException | ||||
| from testgres import stop_all | ||||
|  | ||||
|  | ||||
| class BackupTest(ProbackupTest, unittest.TestCase): | ||||
|  | ||||
| 	def __init__(self, *args, **kwargs): | ||||
| 		super(BackupTest, self).__init__(*args, **kwargs) | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(BackupTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
| 	@classmethod | ||||
| 	def tearDownClass(cls): | ||||
| 		stop_all() | ||||
| #    @classmethod | ||||
| #    def tearDownClass(cls): | ||||
| #        stop_all() | ||||
| #    @unittest.skip("123") | ||||
|     def test_backup_modes_archive(self): | ||||
|         """standart backup modes with ARCHIVE WAL method""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'} | ||||
|             ) | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 	def test_backup_modes_1(self): | ||||
| 		"""standart backup modes""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/backup/backup_modes_1") | ||||
| 		node.start() | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         # full backup mode | ||||
|         with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|  | ||||
| 		# detect ptrack | ||||
| 		is_ptrack = node.execute("postgres", "SELECT proname FROM pg_proc WHERE proname='pg_ptrack_clear'") | ||||
| 		if len(is_ptrack): | ||||
| 			node.append_conf("postgresql.conf", "ptrack_enable = on") | ||||
| 			node.restart() | ||||
|         show_backup = self.show_pb(node)[0] | ||||
|         full_backup_id = show_backup['ID'] | ||||
|         self.assertEqual(show_backup['Status'], six.b("OK")) | ||||
|         self.assertEqual(show_backup['Mode'], six.b("FULL")) | ||||
|  | ||||
| 		# full backup mode | ||||
| 		with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|         # postmaster.pid and postmaster.opts shouldn't be copied | ||||
|         excluded = True | ||||
|         backups_dir = path.join(self.backup_dir(node), "backups") | ||||
|         for backup in listdir(backups_dir): | ||||
|             db_dir = path.join(backups_dir, backup, "database") | ||||
|             for f in listdir(db_dir): | ||||
|                 if path.isfile(path.join(db_dir, f)) and \ | ||||
|                     (f == "postmaster.pid" or f == "postmaster.opts"): | ||||
|                     excluded = False | ||||
|         self.assertEqual(excluded, True) | ||||
|  | ||||
| 		show_backup = self.show_pb(node)[0] | ||||
| 		full_backup_id = show_backup.id | ||||
| 		self.assertEqual(show_backup.status, six.b("OK")) | ||||
| 		self.assertEqual(show_backup.mode, six.b("FULL")) | ||||
|         # page backup mode | ||||
|         with open(path.join(node.logs_dir, "backup_page.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) | ||||
|  | ||||
| 		# postmaster.pid and postmaster.opts shouldn't be copied | ||||
| 		excluded = True | ||||
| 		backups_dir = path.join(self.backup_dir(node), "backups") | ||||
| 		for backup in listdir(backups_dir): | ||||
| 			db_dir = path.join(backups_dir, backup, "database") | ||||
| 			for f in listdir(db_dir): | ||||
| 				if path.isfile(path.join(db_dir, f)) and \ | ||||
| 					(f == "postmaster.pid" or f == "postmaster.opts"): | ||||
| 					excluded = False | ||||
| 		self.assertEqual(excluded, True) | ||||
|         print self.show_pb(node) | ||||
|         show_backup = self.show_pb(node)[1] | ||||
|         self.assertEqual(show_backup['Status'], six.b("OK")) | ||||
|         self.assertEqual(show_backup['Mode'], six.b("PAGE")) | ||||
|  | ||||
| 		# page backup mode | ||||
| 		with open(path.join(node.logs_dir, "backup_page.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) | ||||
|         # Check parent backup | ||||
|         self.assertEqual( | ||||
|             full_backup_id, | ||||
|             self.show_pb(node, id=show_backup['ID'])["parent-backup-id"]) | ||||
|  | ||||
| 		show_backup = self.show_pb(node)[0] | ||||
| 		self.assertEqual(show_backup.status, six.b("OK")) | ||||
| 		self.assertEqual(show_backup.mode, six.b("PAGE")) | ||||
|         # ptrack backup mode | ||||
|         with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose"])) | ||||
|  | ||||
| 		# Check parent backup | ||||
| 		self.assertEqual( | ||||
| 			full_backup_id, | ||||
| 			self.show_pb(node, show_backup.id)[six.b("PARENT_BACKUP")].strip(six.b(" '")) | ||||
| 		) | ||||
|         show_backup = self.show_pb(node)[2] | ||||
|         self.assertEqual(show_backup['Status'], six.b("OK")) | ||||
|         self.assertEqual(show_backup['Mode'], six.b("PTRACK")) | ||||
|  | ||||
| 		# ptrack backup mode | ||||
| 		if len(is_ptrack): | ||||
| 			with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log: | ||||
| 				backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose"])) | ||||
|         node.stop() | ||||
|  | ||||
| 			show_backup = self.show_pb(node)[0] | ||||
| 			self.assertEqual(show_backup.status, six.b("OK")) | ||||
| 			self.assertEqual(show_backup.mode, six.b("PTRACK")) | ||||
| #    @unittest.skip("123") | ||||
|     def test_smooth_checkpoint(self): | ||||
|         """full backup with smooth checkpoint""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 		node.stop() | ||||
|         with open(path.join(node.logs_dir, "backup.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, options=["--verbose", "-C"])) | ||||
|  | ||||
| 	def test_smooth_checkpoint_2(self): | ||||
| 		"""full backup with smooth checkpoint""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/backup/smooth_checkpoint_2") | ||||
| 		node.start() | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK")) | ||||
|  | ||||
| 		with open(path.join(node.logs_dir, "backup.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, options=["--verbose", "-C"])) | ||||
|         node.stop() | ||||
|  | ||||
| 		self.assertEqual(self.show_pb(node)[0].status, six.b("OK")) | ||||
| #    @unittest.skip("123") | ||||
|     def test_page_backup_without_full(self): | ||||
|         """page-level backup without validated full backup""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 		node.stop() | ||||
|         try: | ||||
|             self.backup_pb(node, backup_type="page", options=["--verbose"]) | ||||
|         except ProbackupException, e: | ||||
|             pass | ||||
|         self.assertEqual(self.show_pb(node)[0]['Status'], six.b("ERROR")) | ||||
|  | ||||
| 	def test_page_backup_without_full_3(self): | ||||
| 		"""page-level backup without validated full backup""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/backup/without_full_3") | ||||
| 		node.start() | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         node.stop() | ||||
|  | ||||
| 		with open(path.join(node.logs_dir, "backup.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) | ||||
| #    @unittest.skip("123") | ||||
|     def test_ptrack_threads(self): | ||||
|         """ptrack multi thread backup mode""" | ||||
|         node = self.make_bnode( | ||||
|             base_dir="tmp_dirs/backup/ptrack_threads_4", | ||||
|             options={"ptrack_enable": "on", 'max_wal_senders': '2'} | ||||
|         ) | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 		self.assertEqual(self.show_pb(node)[0].status, six.b("ERROR")) | ||||
|         with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose", "-j", "4"])) | ||||
|  | ||||
| 		node.stop() | ||||
|         self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK")) | ||||
|  | ||||
| 	def test_ptrack_threads_4(self): | ||||
| 		"""ptrack multi thread backup mode""" | ||||
| 		node = self.make_bnode( | ||||
| 			base_dir="tmp_dirs/backup/ptrack_threads_4", | ||||
| 			options={"ptrack_enable": "on"} | ||||
| 		) | ||||
| 		node.start() | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "-j", "4"])) | ||||
|  | ||||
| 		with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose", "-j", "4"])) | ||||
|         self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK")) | ||||
|  | ||||
| 		self.assertEqual(self.show_pb(node)[0].status, six.b("OK")) | ||||
|         node.stop() | ||||
|  | ||||
| 		with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "-j", "4"])) | ||||
| #    @unittest.skip("123") | ||||
|     def test_ptrack_threads_stream(self): | ||||
|         """ptrack multi thread backup mode and stream""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname), | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'} | ||||
|             ) | ||||
| #        node.append_conf("pg_hba.conf", "local replication all trust") | ||||
| #        node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust") | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 		self.assertEqual(self.show_pb(node)[0].status, six.b("OK")) | ||||
|         with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb( | ||||
|                 node, | ||||
|                 backup_type="full", | ||||
|                 options=["--verbose", "-j", "4", "--stream"] | ||||
|             )) | ||||
|  | ||||
| 		node.stop() | ||||
|         self.assertEqual(self.show_pb(node)[0]['Status'], six.b("OK")) | ||||
|  | ||||
| 	def test_ptrack_threads_stream_5(self): | ||||
| 		"""ptrack multi thread backup mode and stream""" | ||||
| 		node = self.make_bnode( | ||||
| 			base_dir="tmp_dirs/backup/ptrack_threads_stream_5", | ||||
| 			options={ | ||||
| 				"ptrack_enable": "on", | ||||
| 				"max_wal_senders": "5" | ||||
| 			} | ||||
| 		) | ||||
| 		node.append_conf("pg_hba.conf", "local replication all trust") | ||||
| 		node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust") | ||||
| 		node.start() | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb( | ||||
|                 node, | ||||
|                 backup_type="ptrack", | ||||
|                 options=["--verbose", "-j", "4", "--stream"] | ||||
|             )) | ||||
|  | ||||
| 		with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb( | ||||
| 				node, | ||||
| 				backup_type="full", | ||||
| 				options=["--verbose", "-j", "4", "--stream"] | ||||
| 			)) | ||||
|         self.assertEqual(self.show_pb(node)[1]['Status'], six.b("OK")) | ||||
|  | ||||
| 		self.assertEqual(self.show_pb(node)[0].status, six.b("OK")) | ||||
|  | ||||
| 		with open(path.join(node.logs_dir, "backup_ptrack.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb( | ||||
| 				node, | ||||
| 				backup_type="ptrack", | ||||
| 				options=["--verbose", "-j", "4", "--stream"] | ||||
| 			)) | ||||
|  | ||||
| 		self.assertEqual(self.show_pb(node)[0].status, six.b("OK")) | ||||
|  | ||||
| 		node.stop() | ||||
|         node.stop() | ||||
|   | ||||
| @@ -1,88 +1,102 @@ | ||||
| import unittest | ||||
| from os import path | ||||
| import six | ||||
| from .pb_lib import ProbackupTest | ||||
| from .ptrack_helpers import ProbackupTest, ProbackupException | ||||
| from testgres import stop_all | ||||
| import subprocess | ||||
|  | ||||
|  | ||||
| class DeleteTest(ProbackupTest, unittest.TestCase): | ||||
|  | ||||
| 	def __init__(self, *args, **kwargs): | ||||
| 		super(DeleteTest, self).__init__(*args, **kwargs) | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(DeleteTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
| 	@classmethod | ||||
| 	def tearDownClass(cls): | ||||
| 		stop_all() | ||||
| #    @classmethod | ||||
| #    def tearDownClass(cls): | ||||
| #        stop_all() | ||||
| #    @unittest.skip("123") | ||||
|     def test_delete_full_backups(self): | ||||
|         """delete full backups""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/delete/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         node.pgbench_init() | ||||
|  | ||||
| 	def test_delete_full_backups_1(self): | ||||
| 		"""delete full backups""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/delete/delete_full_backups_1") | ||||
| 		node.start() | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
| 		node.pgbench_init() | ||||
|         # full backup mode | ||||
|         with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|  | ||||
| 		# full backup mode | ||||
| 		with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|         pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | ||||
|         pgbench.wait() | ||||
|         pgbench.stdout.close() | ||||
|  | ||||
| 		pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | ||||
| 		pgbench.wait() | ||||
| 		pgbench.stdout.close() | ||||
|         with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|  | ||||
| 		with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|         pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | ||||
|         pgbench.wait() | ||||
|         pgbench.stdout.close() | ||||
|  | ||||
| 		pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | ||||
| 		pgbench.wait() | ||||
| 		pgbench.stdout.close() | ||||
|         with open(path.join(node.logs_dir, "backup_3.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|  | ||||
| 		with open(path.join(node.logs_dir, "backup_3.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|         show_backups = self.show_pb(node) | ||||
|         id_1 = show_backups[0]['ID'] | ||||
|         id_3 = show_backups[2]['ID'] | ||||
|         self.delete_pb(node, show_backups[1]['ID']) | ||||
|         show_backups = self.show_pb(node) | ||||
|         self.assertEqual(show_backups[0]['ID'], id_1) | ||||
|         self.assertEqual(show_backups[1]['ID'], id_3) | ||||
|  | ||||
| 		show_backups = self.show_pb(node) | ||||
| 		id_1 = show_backups[0].id | ||||
| 		id_2 = show_backups[2].id | ||||
| 		self.delete_pb(node, show_backups[1].id) | ||||
| 		show_backups = self.show_pb(node) | ||||
| 		self.assertEqual(show_backups[0].id, id_1) | ||||
| 		self.assertEqual(show_backups[1].id, id_2) | ||||
|         node.stop() | ||||
|  | ||||
| 		node.stop() | ||||
| #    @unittest.skip("123") | ||||
|     def test_delete_increment(self): | ||||
|         """delete increment and all after him""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/delete/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 	def test_delete_increment_2(self): | ||||
| 		"""delete increment and all after him""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/delete/delete_increment_2") | ||||
| 		node.start() | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         # full backup mode | ||||
|         with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|  | ||||
| 		# full backup mode | ||||
| 		with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, options=["--verbose"])) | ||||
|         # page backup mode | ||||
|         with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) | ||||
|  | ||||
| 		# page backup mode | ||||
| 		with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) | ||||
|         # page backup mode | ||||
|         with open(path.join(node.logs_dir, "backup_3.log"), "wb") as backup_log: | ||||
|             backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) | ||||
|  | ||||
| 		# page backup mode | ||||
| 		with open(path.join(node.logs_dir, "backup_3.log"), "wb") as backup_log: | ||||
| 			backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"])) | ||||
|         # full backup mode | ||||
|         self.backup_pb(node) | ||||
|  | ||||
| 		# full backup mode | ||||
| 		self.backup_pb(node) | ||||
|         show_backups = self.show_pb(node) | ||||
|  | ||||
| 		show_backups = self.show_pb(node) | ||||
| 		self.assertEqual(len(show_backups), 4) | ||||
|         self.assertEqual(len(show_backups), 4) | ||||
|  | ||||
| 		# delete first page backup | ||||
| 		self.delete_pb(node, show_backups[2].id) | ||||
|         # delete first page backup | ||||
|         self.delete_pb(node, show_backups[1]['ID']) | ||||
|  | ||||
| 		show_backups = self.show_pb(node) | ||||
| 		self.assertEqual(len(show_backups), 2) | ||||
|         show_backups = self.show_pb(node) | ||||
|         self.assertEqual(len(show_backups), 2) | ||||
|  | ||||
| 		self.assertEqual(show_backups[0].mode, six.b("FULL")) | ||||
| 		self.assertEqual(show_backups[0].status, six.b("OK")) | ||||
| 		self.assertEqual(show_backups[1].mode, six.b("FULL")) | ||||
| 		self.assertEqual(show_backups[1].status, six.b("OK")) | ||||
|         self.assertEqual(show_backups[0]['Mode'], six.b("FULL")) | ||||
|         self.assertEqual(show_backups[0]['Status'], six.b("OK")) | ||||
|         self.assertEqual(show_backups[1]['Mode'], six.b("FULL")) | ||||
|         self.assertEqual(show_backups[1]['Status'], six.b("OK")) | ||||
|  | ||||
| 		node.stop() | ||||
|         node.stop() | ||||
|   | ||||
| @@ -1,59 +1,40 @@ | ||||
| pg_probackup manage backup/recovery of PostgreSQL database. | ||||
|  | ||||
| Usage: | ||||
|   pg_probackup [option...] init | ||||
|   pg_probackup [option...] backup | ||||
|   pg_probackup [option...] restore [backup-ID] | ||||
|   pg_probackup [option...] show [backup-ID] | ||||
|   pg_probackup [option...] validate [backup-ID] | ||||
|   pg_probackup [option...] delete backup-ID | ||||
|   pg_probackup [option...] delwal [backup-ID] | ||||
|   pg_probackup [option...] retention show|purge | ||||
| pg_probackup - utility to manage backup/recovery of PostgreSQL database. | ||||
|  | ||||
| Common Options: | ||||
|   -B, --backup-path=PATH    location of the backup storage area | ||||
|   -D, --pgdata=PATH         location of the database storage area | ||||
|   pg_probackup help | ||||
|  | ||||
| Backup options: | ||||
|   -b, --backup-mode=MODE    backup mode (full, page, ptrack) | ||||
|   -C, --smooth-checkpoint   do smooth checkpoint before backup | ||||
|       --stream              stream the transaction log and include it in the backup | ||||
|       --archive-timeout     wait timeout for WAL segment archiving | ||||
|   -S, --slot=SLOTNAME       replication slot to use | ||||
|       --backup-pg-log       backup of pg_log directory | ||||
|   -j, --threads=NUM         number of parallel threads | ||||
|       --progress            show progress | ||||
|   pg_probackup version | ||||
|  | ||||
| Restore options: | ||||
|       --time                time stamp up to which recovery will proceed | ||||
|       --xid                 transaction ID up to which recovery will proceed | ||||
|       --inclusive           whether we stop just after the recovery target | ||||
|       --timeline            recovering into a particular timeline | ||||
|   -T, --tablespace-mapping=OLDDIR=NEWDIR | ||||
|                             relocate the tablespace in directory OLDDIR to NEWDIR | ||||
|   -j, --threads=NUM         number of parallel threads | ||||
|       --progress            show progress | ||||
|   pg_probackup init -B backup-path -D pgdata-dir | ||||
|  | ||||
| Delete options: | ||||
|       --wal                 remove unnecessary wal files | ||||
|   pg_probackup set-config -B backup-dir | ||||
|                  [-d dbname] [-h host] [-p port] [-U username] | ||||
|                  [--retention-redundancy=retention-redundancy]] | ||||
|                  [--retention-window=retention-window] | ||||
|  | ||||
| Retention options: | ||||
|       --redundancy          specifies how many full backups purge command should keep | ||||
|       --window              specifies the number of days of recoverability | ||||
|   pg_probackup show-config -B backup-dir | ||||
|  | ||||
| Connection options: | ||||
|   -d, --dbname=DBNAME       database to connect | ||||
|   -h, --host=HOSTNAME       database server host or socket directory | ||||
|   -p, --port=PORT           database server port | ||||
|   -U, --username=USERNAME   user name to connect as | ||||
|   -w, --no-password         never prompt for password | ||||
|   -W, --password            force password prompt | ||||
|   pg_probackup backup -B backup-path -b backup-mode | ||||
|                  [-D pgdata-dir] [-C] [--stream [-S slot-name]] [--backup-pg-log] | ||||
|                  [-j num-threads] [--archive-timeout=archive-timeout] | ||||
|                  [--progress] [-q] [-v] [--delete-expired] | ||||
|                  [-d dbname] [-h host] [-p port] [-U username] | ||||
|  | ||||
| Generic options: | ||||
|   -q, --quiet               don't write any messages | ||||
|   -v, --verbose             verbose mode | ||||
|       --help                show this help, then exit | ||||
|       --version             output version information and exit | ||||
|   pg_probackup restore -B backup-dir | ||||
|                 [-D pgdata-dir] [-i backup-id] | ||||
|                 [--time=time|--xid=xid [--inclusive=boolean]] | ||||
|                 [--timeline=timeline] [-T OLDDIR=NEWDIR] | ||||
|  | ||||
|   pg_probackup validate -B backup-dir | ||||
|                 [-D pgdata-dir] [-i backup-id] | ||||
|                 [--time=time|--xid=xid [--inclusive=boolean]] | ||||
|                 [--timeline=timeline] [-T OLDDIR=NEWDIR] | ||||
|  | ||||
|   pg_probackup show -B backup-dir | ||||
|                 [-i backup-id] | ||||
|  | ||||
|   pg_probackup delete -B backup-dir | ||||
|                 [--wal] [-i backup-id | --expired] [--force] | ||||
|  | ||||
| Read the website for details. <https://github.com/postgrespro/pg_probackup> | ||||
| Report bugs to <https://github.com/postgrespro/pg_probackup/issues>. | ||||
|   | ||||
| @@ -1 +1 @@ | ||||
| pg_probackup 1.1.5 | ||||
| pg_probackup 1.1.9 | ||||
|   | ||||
| @@ -1,41 +1,59 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| import os | ||||
| from os import path | ||||
| import six | ||||
| from .pb_lib import dir_files, ProbackupTest | ||||
| from .ptrack_helpers import dir_files, ProbackupTest, ProbackupException | ||||
|  | ||||
| #TODO  | ||||
|  | ||||
| class InitTest(ProbackupTest, unittest.TestCase): | ||||
|  | ||||
| 	def __init__(self, *args, **kwargs): | ||||
| 		super(InitTest, self).__init__(*args, **kwargs) | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(InitTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
| 	def test_success_1(self): | ||||
| 		"""Success normal init""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/init/success_1") | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
| 		self.assertEqual( | ||||
| 			dir_files(self.backup_dir(node)), | ||||
| 			['backups', 'pg_probackup.conf', 'wal'] | ||||
| 		) | ||||
|     def test_success_1(self): | ||||
|         """Success normal init""" | ||||
|         fname = self.id().split(".")[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/init/{0}".format(fname)) | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         self.assertEqual( | ||||
|             dir_files(self.backup_dir(node)), | ||||
|             ['backups', 'pg_probackup.conf', 'wal'] | ||||
|         ) | ||||
|  | ||||
| 	def test_already_exist_2(self): | ||||
| 		"""Failure with backup catalog already existed""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/init/already_exist_2") | ||||
| 		self.init_pb(node) | ||||
| 		self.assertEqual( | ||||
| 			self.init_pb(node), | ||||
| 			six.b("ERROR: backup catalog already exist and it's not empty\n") | ||||
| 		) | ||||
|     def test_already_exist_2(self): | ||||
|         """Failure with backup catalog already existed""" | ||||
|         fname = self.id().split(".")[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/init/{0}".format(fname)) | ||||
|         self.init_pb(node) | ||||
|         try: | ||||
|             self.init_pb(node) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 "ERROR: backup catalog already exist and it's not empty\n" | ||||
|                 ) | ||||
|  | ||||
| 	def test_abs_path_3(self): | ||||
| 		"""failure with backup catalog should be given as absolute path""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/init/abs_path_3") | ||||
| 		self.assertEqual( | ||||
| 			self.run_pb(["init", "-B", path.relpath("%s/backup" % node.base_dir, self.dir_path)]), | ||||
| 			six.b("ERROR: -B, --backup-path must be an absolute path\n") | ||||
| 		) | ||||
|     def test_abs_path_3(self): | ||||
|         """failure with backup catalog should be given as absolute path""" | ||||
|         fname = self.id().split(".")[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/init/{0}".format(fname)) | ||||
|         try: | ||||
|             self.run_pb(["init", "-B", path.relpath("%s/backup" % node.base_dir, self.dir_path)]) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 "ERROR: -B, --backup-path must be an absolute path\n" | ||||
|                 ) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
| 	unittest.main() | ||||
|     unittest.main() | ||||
|   | ||||
| @@ -1,132 +1,187 @@ | ||||
| import unittest | ||||
| from os import path | ||||
| import six | ||||
| from .pb_lib import ProbackupTest | ||||
| from .ptrack_helpers import ProbackupTest, ProbackupException | ||||
| from testgres import stop_all | ||||
|  | ||||
|  | ||||
| class OptionTest(ProbackupTest, unittest.TestCase): | ||||
|  | ||||
| 	def __init__(self, *args, **kwargs): | ||||
| 		super(OptionTest, self).__init__(*args, **kwargs) | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(OptionTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
| 	@classmethod | ||||
| 	def tearDownClass(cls): | ||||
| 		stop_all() | ||||
|     @classmethod | ||||
|     def tearDownClass(cls): | ||||
|         stop_all() | ||||
|  | ||||
| 	def test_help_1(self): | ||||
| 		"""help options""" | ||||
| 		with open(path.join(self.dir_path, "expected/option_help.out"), "rb") as help_out: | ||||
| 			self.assertEqual( | ||||
| 				self.run_pb(["--help"]), | ||||
| 				help_out.read() | ||||
| 			) | ||||
|     def test_help_1(self): | ||||
|         """help options""" | ||||
|         fname = self.id().split(".")[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         with open(path.join(self.dir_path, "expected/option_help.out"), "rb") as help_out: | ||||
|             self.assertEqual( | ||||
|                 self.run_pb(["--help"]), | ||||
|                 help_out.read() | ||||
|             ) | ||||
|  | ||||
| 	def test_version_2(self): | ||||
| 		"""help options""" | ||||
| 		with open(path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out: | ||||
| 			self.assertEqual( | ||||
| 				self.run_pb(["--version"]), | ||||
| 				version_out.read() | ||||
| 			) | ||||
|     def test_version_2(self): | ||||
|         """help options""" | ||||
|         fname = self.id().split(".")[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         with open(path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out: | ||||
|             self.assertEqual( | ||||
|                 self.run_pb(["--version"]), | ||||
|                 version_out.read() | ||||
|             ) | ||||
|  | ||||
| 	def test_without_backup_path_3(self): | ||||
| 		"""backup command failure without backup mode option""" | ||||
| 		self.assertEqual( | ||||
| 			self.run_pb(["backup", "-b", "full"]), | ||||
| 			six.b("ERROR: required parameter not specified: BACKUP_PATH (-B, --backup-path)\n") | ||||
| 		) | ||||
|     def test_without_backup_path_3(self): | ||||
|         """backup command failure without backup mode option""" | ||||
|         fname = self.id().split(".")[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         try: | ||||
|             self.run_pb(["backup", "-b", "full"]) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 'ERROR: required parameter not specified: BACKUP_PATH (-B, --backup-path)\n' | ||||
|                 ) | ||||
|  | ||||
| 	def test_options_4(self): | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/option/option_common") | ||||
| 		try: | ||||
| 			node.stop() | ||||
| 		except: | ||||
| 			pass | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|     def test_options_4(self): | ||||
|         """check options test""" | ||||
|         fname = self.id().split(".")[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/option/{0}".format(fname)) | ||||
|         try: | ||||
|             node.stop() | ||||
|         except: | ||||
|             pass | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 		# backup command failure without backup mode option | ||||
| 		self.assertEqual( | ||||
| 			self.run_pb(["backup", "-B", self.backup_dir(node), "-D", node.data_dir]), | ||||
| 			six.b("ERROR: required parameter not specified: BACKUP_MODE (-b, --backup-mode)\n") | ||||
| 		) | ||||
|         # backup command failure without backup mode option | ||||
|         try: | ||||
|             self.run_pb(["backup", "-B", self.backup_dir(node), "-D", node.data_dir]) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             print e.message | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 'ERROR: required parameter not specified: BACKUP_MODE (-b, --backup-mode)\n' | ||||
|                 ) | ||||
|  | ||||
| 		# backup command failure with invalid backup mode option | ||||
| 		self.assertEqual( | ||||
| 			self.run_pb(["backup", "-b", "bad", "-B", self.backup_dir(node)]), | ||||
| 			six.b('ERROR: invalid backup-mode "bad"\n') | ||||
| 		) | ||||
|         # backup command failure with invalid backup mode option | ||||
|         try: | ||||
|             self.run_pb(["backup", "-b", "bad", "-B", self.backup_dir(node)]) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 'ERROR: invalid backup-mode "bad"\n' | ||||
|                 ) | ||||
|  | ||||
| 		# delete failure without ID | ||||
| 		self.assertEqual( | ||||
| 			self.run_pb(["delete", "-B", self.backup_dir(node)]), | ||||
| 			six.b("ERROR: required backup ID not specified\n") | ||||
| 		) | ||||
|         # delete failure without ID | ||||
|         try: | ||||
|             self.run_pb(["delete", "-B", self.backup_dir(node)]) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 'ERROR: required backup ID not specified\n' | ||||
|                 ) | ||||
|  | ||||
| 		node.start() | ||||
|         node.start() | ||||
|  | ||||
| 		# syntax error in pg_probackup.conf | ||||
| 		with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
| 			conf.write(" = INFINITE\n") | ||||
|         # syntax error in pg_probackup.conf | ||||
|         with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
|             conf.write(" = INFINITE\n") | ||||
|  | ||||
| 		self.assertEqual( | ||||
| 			self.backup_pb(node), | ||||
| 			six.b('ERROR: syntax error in " = INFINITE"\n') | ||||
| 		) | ||||
|         try: | ||||
|             self.backup_pb(node) | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 'ERROR: syntax error in " = INFINITE"\n' | ||||
|                 ) | ||||
|  | ||||
| 		self.clean_pb(node) | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         self.clean_pb(node) | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 		# invalid value in pg_probackup.conf | ||||
| 		with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
| 			conf.write("BACKUP_MODE=\n") | ||||
|         # invalid value in pg_probackup.conf | ||||
|         with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
|             conf.write("BACKUP_MODE=\n") | ||||
|  | ||||
| 		self.assertEqual( | ||||
| 			self.backup_pb(node, backup_type=None), | ||||
| 			six.b('ERROR: invalid backup-mode ""\n') | ||||
| 		) | ||||
|         try: | ||||
|             self.backup_pb(node, backup_type=None), | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 'ERROR: invalid backup-mode ""\n' | ||||
|                 ) | ||||
|  | ||||
| 		self.clean_pb(node) | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         self.clean_pb(node) | ||||
|  | ||||
| 		# Command line parameters should override file values | ||||
| 		self.init_pb(node) | ||||
| 		with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
| 			conf.write("REDUNDANCY=1\n") | ||||
|         # Command line parameters should override file values | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
|             conf.write("retention-redundancy=1\n") | ||||
|  | ||||
| 		self.assertEqual( | ||||
| 			self.retention_show(node, ["--redundancy", "2"]), | ||||
| 			six.b("# retention policy\nREDUNDANCY=2\n") | ||||
| 		) | ||||
| #       TODO AFTER PGPRO-505 | ||||
| #        self.assertEqual( | ||||
| #            self.retention_show(node, ["--redundancy", "2"]), | ||||
| #            six.b("# retention policy\nREDUNDANCY=2\n") | ||||
| #        ) | ||||
|  | ||||
| 		# User cannot send --system-identifier parameter via command line | ||||
| 		self.assertEqual( | ||||
| 			self.backup_pb(node, options=["--system-identifier", "123"]), | ||||
| 			six.b("ERROR: option system-identifier cannot be specified in command line\n") | ||||
| 		) | ||||
|         # User cannot send --system-identifier parameter via command line | ||||
|         try: | ||||
|             self.backup_pb(node, options=["--system-identifier", "123"]), | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 'ERROR: option system-identifier cannot be specified in command line\n' | ||||
|                 ) | ||||
|  | ||||
| 		# invalid value in pg_probackup.conf | ||||
| 		with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
| 			conf.write("SMOOTH_CHECKPOINT=FOO\n") | ||||
|         # invalid value in pg_probackup.conf | ||||
|         with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
|             conf.write("SMOOTH_CHECKPOINT=FOO\n") | ||||
|  | ||||
| 		self.assertEqual( | ||||
| 			self.backup_pb(node), | ||||
| 			six.b("ERROR: option -C, --smooth-checkpoint should be a boolean: 'FOO'\n") | ||||
| 		) | ||||
|         try: | ||||
|             self.backup_pb(node), | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 "ERROR: option -C, --smooth-checkpoint should be a boolean: 'FOO'\n" | ||||
|                 ) | ||||
|  | ||||
| 		self.clean_pb(node) | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         self.clean_pb(node) | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 		# invalid option in pg_probackup.conf | ||||
| 		with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
| 			conf.write("TIMELINEID=1\n") | ||||
|         # invalid option in pg_probackup.conf | ||||
|         with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf: | ||||
|             conf.write("TIMELINEID=1\n") | ||||
|  | ||||
| 		self.assertEqual( | ||||
| 			self.backup_pb(node), | ||||
| 			six.b('ERROR: invalid option "TIMELINEID"\n') | ||||
| 		) | ||||
|         try: | ||||
|             self.backup_pb(node), | ||||
|             # we should die here because exception is what we expect to happen | ||||
|             exit(1) | ||||
|         except ProbackupException, e: | ||||
|             self.assertEqual( | ||||
|                 e.message, | ||||
|                 'ERROR: invalid option "TIMELINEID"\n' | ||||
|                 ) | ||||
|  | ||||
| 		self.clean_pb(node) | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         self.clean_pb(node) | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 		node.stop() | ||||
|         node.stop() | ||||
|   | ||||
							
								
								
									
										488
									
								
								tests/pb_lib.py
									
									
									
									
									
								
							
							
						
						
									
										488
									
								
								tests/pb_lib.py
									
									
									
									
									
								
							| @@ -7,298 +7,298 @@ from testgres import get_new_node | ||||
|  | ||||
|  | ||||
| def dir_files(base_dir): | ||||
| 	out_list = [] | ||||
| 	for dir_name, subdir_list, file_list in os.walk(base_dir): | ||||
| 		if dir_name != base_dir: | ||||
| 			out_list.append(path.relpath(dir_name, base_dir)) | ||||
| 		for fname in file_list: | ||||
| 			out_list.append(path.relpath(path.join(dir_name, fname), base_dir)) | ||||
| 	out_list.sort() | ||||
| 	return out_list | ||||
|     out_list = [] | ||||
|     for dir_name, subdir_list, file_list in os.walk(base_dir): | ||||
|         if dir_name != base_dir: | ||||
|             out_list.append(path.relpath(dir_name, base_dir)) | ||||
|         for fname in file_list: | ||||
|             out_list.append(path.relpath(path.join(dir_name, fname), base_dir)) | ||||
|     out_list.sort() | ||||
|     return out_list | ||||
|  | ||||
|  | ||||
| class ShowBackup(object): | ||||
| 	def __init__(self, split_line): | ||||
| 		self.counter = 0 | ||||
|     def __init__(self, split_line): | ||||
|         self.counter = 0 | ||||
|  | ||||
| 		self.id = self.get_inc(split_line) | ||||
| 		# TODO: parse to datetime | ||||
| 		if len(split_line) == 12: | ||||
| 			self.recovery_time = "%s %s" % (self.get_inc(split_line), | ||||
| 				self.get_inc(split_line)) | ||||
| 		# if recovery time is '----' | ||||
| 		else: | ||||
| 			self.recovery_time = self.get_inc(split_line) | ||||
| 		self.mode = self.get_inc(split_line) | ||||
| 		self.cur_tli = self.get_inc(split_line) | ||||
| 		# slash | ||||
| 		self.counter += 1 | ||||
| 		self.parent_tli = self.get_inc(split_line) | ||||
| 		# TODO: parse to interval | ||||
| 		self.time = self.get_inc(split_line) | ||||
| 		# TODO: maybe rename to size? | ||||
| 		self.data = self.get_inc(split_line) | ||||
| 		self.start_lsn = self.get_inc(split_line) | ||||
| 		self.stop_lsn = self.get_inc(split_line) | ||||
| 		self.status = self.get_inc(split_line) | ||||
|         self.id = self.get_inc(split_line) | ||||
|         # TODO: parse to datetime | ||||
|         if len(split_line) == 12: | ||||
|             self.recovery_time = "%s %s" % (self.get_inc(split_line), | ||||
|                 self.get_inc(split_line)) | ||||
|         # if recovery time is '----' | ||||
|         else: | ||||
|             self.recovery_time = self.get_inc(split_line) | ||||
|         self.mode = self.get_inc(split_line) | ||||
|         self.cur_tli = self.get_inc(split_line) | ||||
|         # slash | ||||
|         self.counter += 1 | ||||
|         self.parent_tli = self.get_inc(split_line) | ||||
|         # TODO: parse to interval | ||||
|         self.time = self.get_inc(split_line) | ||||
|         # TODO: maybe rename to size? | ||||
|         self.data = self.get_inc(split_line) | ||||
|         self.start_lsn = self.get_inc(split_line) | ||||
|         self.stop_lsn = self.get_inc(split_line) | ||||
|         self.status = self.get_inc(split_line) | ||||
|  | ||||
| 	def get_inc(self, split_line): | ||||
| 		self.counter += 1 | ||||
| 		return split_line[self.counter - 1] | ||||
|     def get_inc(self, split_line): | ||||
|         self.counter += 1 | ||||
|         return split_line[self.counter - 1] | ||||
|  | ||||
|  | ||||
| class ProbackupTest(object): | ||||
| 	def __init__(self, *args, **kwargs): | ||||
| 		super(ProbackupTest, self).__init__(*args, **kwargs) | ||||
| 		self.test_env = os.environ.copy() | ||||
| 		envs_list = [ | ||||
| 			"LANGUAGE", | ||||
| 			"LC_ALL", | ||||
| 			"PGCONNECT_TIMEOUT", | ||||
| 			"PGDATA", | ||||
| 			"PGDATABASE", | ||||
| 			"PGHOSTADDR", | ||||
| 			"PGREQUIRESSL", | ||||
| 			"PGSERVICE", | ||||
| 			"PGSSLMODE", | ||||
| 			"PGUSER", | ||||
| 			"PGPORT", | ||||
| 			"PGHOST" | ||||
| 		] | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(ProbackupTest, self).__init__(*args, **kwargs) | ||||
|         self.test_env = os.environ.copy() | ||||
|         envs_list = [ | ||||
|             "LANGUAGE", | ||||
|             "LC_ALL", | ||||
|             "PGCONNECT_TIMEOUT", | ||||
|             "PGDATA", | ||||
|             "PGDATABASE", | ||||
|             "PGHOSTADDR", | ||||
|             "PGREQUIRESSL", | ||||
|             "PGSERVICE", | ||||
|             "PGSSLMODE", | ||||
|             "PGUSER", | ||||
|             "PGPORT", | ||||
|             "PGHOST" | ||||
|         ] | ||||
|  | ||||
| 		for e in envs_list: | ||||
| 			try: | ||||
| 				del self.test_env[e] | ||||
| 			except: | ||||
| 				pass | ||||
|         for e in envs_list: | ||||
|             try: | ||||
|                 del self.test_env[e] | ||||
|             except: | ||||
|                 pass | ||||
|  | ||||
| 		self.test_env["LC_MESSAGES"] = "C" | ||||
| 		self.test_env["LC_TIME"] = "C" | ||||
|         self.test_env["LC_MESSAGES"] = "C" | ||||
|         self.test_env["LC_TIME"] = "C" | ||||
|  | ||||
| 		self.dir_path = path.dirname(os.path.realpath(__file__)) | ||||
| 		try: | ||||
| 			os.makedirs(path.join(self.dir_path, "tmp_dirs")) | ||||
| 		except: | ||||
| 			pass | ||||
| 		self.probackup_path = os.path.abspath(path.join( | ||||
| 			self.dir_path, | ||||
| 			"../pg_probackup" | ||||
| 		)) | ||||
|         self.dir_path = path.dirname(os.path.realpath(__file__)) | ||||
|         try: | ||||
|             os.makedirs(path.join(self.dir_path, "tmp_dirs")) | ||||
|         except: | ||||
|             pass | ||||
|         self.probackup_path = os.path.abspath(path.join( | ||||
|             self.dir_path, | ||||
|             "../pg_probackup" | ||||
|         )) | ||||
|  | ||||
| 	def arcwal_dir(self, node): | ||||
| 		return "%s/backup/wal" % node.base_dir | ||||
|     def arcwal_dir(self, node): | ||||
|         return "%s/backup/wal" % node.base_dir | ||||
|  | ||||
| 	def backup_dir(self, node): | ||||
| 		return os.path.abspath("%s/backup" % node.base_dir) | ||||
|     def backup_dir(self, node): | ||||
|         return os.path.abspath("%s/backup" % node.base_dir) | ||||
|  | ||||
| 	def make_bnode(self, base_dir=None, allows_streaming=False, options={}): | ||||
| 		real_base_dir = path.join(self.dir_path, base_dir) | ||||
| 		shutil.rmtree(real_base_dir, ignore_errors=True) | ||||
|     def make_bnode(self, base_dir=None, allows_streaming=False, options={}): | ||||
|         real_base_dir = path.join(self.dir_path, base_dir) | ||||
|         shutil.rmtree(real_base_dir, ignore_errors=True) | ||||
|  | ||||
| 		node = get_new_node('test', base_dir=real_base_dir) | ||||
| 		node.init(allows_streaming=allows_streaming) | ||||
|         node = get_new_node('test', base_dir=real_base_dir) | ||||
|         node.init(allows_streaming=allows_streaming) | ||||
|  | ||||
| 		if not allows_streaming: | ||||
| 			node.append_conf("postgresql.conf", "wal_level = hot_standby") | ||||
| 		node.append_conf("postgresql.conf", "archive_mode = on") | ||||
| 		node.append_conf( | ||||
| 			"postgresql.conf", | ||||
| 			"""archive_command = 'cp "%%p" "%s/%%f"'""" % os.path.abspath(self.arcwal_dir(node)) | ||||
| 		) | ||||
|         if not allows_streaming: | ||||
|             node.append_conf("postgresql.conf", "wal_level = hot_standby") | ||||
|         node.append_conf("postgresql.conf", "archive_mode = on") | ||||
|         node.append_conf( | ||||
|             "postgresql.conf", | ||||
|             """archive_command = 'cp "%%p" "%s/%%f"'""" % os.path.abspath(self.arcwal_dir(node)) | ||||
|         ) | ||||
|  | ||||
| 		for key, value in six.iteritems(options): | ||||
| 			node.append_conf("postgresql.conf", "%s = %s" % (key, value)) | ||||
|         for key, value in six.iteritems(options): | ||||
|             node.append_conf("postgresql.conf", "%s = %s" % (key, value)) | ||||
|  | ||||
| 		return node | ||||
|         return node | ||||
|  | ||||
| 	def make_bnode_replica(self, root_node, base_dir=None, options={}): | ||||
| 		real_base_dir = path.join(self.dir_path, base_dir) | ||||
| 		shutil.rmtree(real_base_dir, ignore_errors=True) | ||||
|     def make_bnode_replica(self, root_node, base_dir=None, options={}): | ||||
|         real_base_dir = path.join(self.dir_path, base_dir) | ||||
|         shutil.rmtree(real_base_dir, ignore_errors=True) | ||||
|  | ||||
| 		root_node.backup("basebackup") | ||||
|         root_node.backup("basebackup") | ||||
|  | ||||
| 		replica = get_new_node("replica", base_dir=real_base_dir) | ||||
| 		# replica.init_from_backup(root_node, "data_replica", has_streaming=True) | ||||
|         replica = get_new_node("replica", base_dir=real_base_dir) | ||||
|         # replica.init_from_backup(root_node, "data_replica", has_streaming=True) | ||||
|  | ||||
| 		# Move data from backup | ||||
| 		backup_path = os.path.join(root_node.base_dir, "basebackup") | ||||
| 		shutil.move(backup_path, replica.data_dir) | ||||
| 		os.chmod(replica.data_dir, 0o0700) | ||||
|         # Move data from backup | ||||
|         backup_path = os.path.join(root_node.base_dir, "basebackup") | ||||
|         shutil.move(backup_path, replica.data_dir) | ||||
|         os.chmod(replica.data_dir, 0o0700) | ||||
|  | ||||
| 		# Change port in config file | ||||
| 		replica.append_conf( | ||||
| 			"postgresql.conf", | ||||
| 			"port = {}".format(replica.port) | ||||
| 		) | ||||
| 		# Enable streaming | ||||
| 		replica.enable_streaming(root_node) | ||||
|         # Change port in config file | ||||
|         replica.append_conf( | ||||
|             "postgresql.conf", | ||||
|             "port = {}".format(replica.port) | ||||
|         ) | ||||
|         # Enable streaming | ||||
|         replica.enable_streaming(root_node) | ||||
|  | ||||
| 		for key, value in six.iteritems(options): | ||||
| 			replica.append_conf("postgresql.conf", "%s = %s" % (key, value)) | ||||
|         for key, value in six.iteritems(options): | ||||
|             replica.append_conf("postgresql.conf", "%s = %s" % (key, value)) | ||||
|  | ||||
| 		return replica | ||||
|         return replica | ||||
|  | ||||
| 	def run_pb(self, command): | ||||
| 		try: | ||||
| 			return subprocess.check_output( | ||||
| 				[self.probackup_path] + command, | ||||
| 				stderr=subprocess.STDOUT, | ||||
| 				env=self.test_env | ||||
| 			) | ||||
| 		except subprocess.CalledProcessError as err: | ||||
| 			return err.output | ||||
|     def run_pb(self, command): | ||||
|         try: | ||||
|             return subprocess.check_output( | ||||
|                 [self.probackup_path] + command, | ||||
|                 stderr=subprocess.STDOUT, | ||||
|                 env=self.test_env | ||||
|             ) | ||||
|         except subprocess.CalledProcessError as err: | ||||
|             return err.output | ||||
|  | ||||
| 	def init_pb(self, node): | ||||
| 		return self.run_pb([ | ||||
| 			"init", | ||||
| 			"-B", self.backup_dir(node), | ||||
| 			"-D", node.data_dir | ||||
| 		]) | ||||
|     def init_pb(self, node): | ||||
|         return self.run_pb([ | ||||
|             "init", | ||||
|             "-B", self.backup_dir(node), | ||||
|             "-D", node.data_dir | ||||
|         ]) | ||||
|  | ||||
| 	def clean_pb(self, node): | ||||
| 		shutil.rmtree(self.backup_dir(node), ignore_errors=True) | ||||
|     def clean_pb(self, node): | ||||
|         shutil.rmtree(self.backup_dir(node), ignore_errors=True) | ||||
|  | ||||
| 	def backup_pb(self, node, backup_type="full", options=[]): | ||||
| 		cmd_list = [ | ||||
| 			"backup", | ||||
| 			"-D", node.data_dir, | ||||
| 			"-B", self.backup_dir(node), | ||||
| 			"-p", "%i" % node.port, | ||||
| 			"-d", "postgres" | ||||
| 		] | ||||
| 		if backup_type: | ||||
| 			cmd_list += ["-b", backup_type] | ||||
|     def backup_pb(self, node, backup_type="full", options=[]): | ||||
|         cmd_list = [ | ||||
|             "backup", | ||||
|             "-D", node.data_dir, | ||||
|             "-B", self.backup_dir(node), | ||||
|             "-p", "%i" % node.port, | ||||
|             "-d", "postgres" | ||||
|         ] | ||||
|         if backup_type: | ||||
|             cmd_list += ["-b", backup_type] | ||||
|  | ||||
| 		return self.run_pb(cmd_list + options) | ||||
|         return self.run_pb(cmd_list + options) | ||||
|  | ||||
| 	def backup_pb_proc(self, node, backup_dir, backup_type="full", | ||||
| 		stdout=None, stderr=None, options=[]): | ||||
| 		cmd_list = [ | ||||
| 			self.probackup_path, | ||||
| 			"backup", | ||||
| 			"-D", node.data_dir, | ||||
| 			"-B", backup_dir, | ||||
| 			"-p", "%i" % (node.port), | ||||
| 			"-d", "postgres" | ||||
| 		] | ||||
| 		if backup_type: | ||||
| 			cmd_list += ["-b", backup_type] | ||||
|     def backup_pb_proc(self, node, backup_dir, backup_type="full", | ||||
|         stdout=None, stderr=None, options=[]): | ||||
|         cmd_list = [ | ||||
|             self.probackup_path, | ||||
|             "backup", | ||||
|             "-D", node.data_dir, | ||||
|             "-B", backup_dir, | ||||
|             "-p", "%i" % (node.port), | ||||
|             "-d", "postgres" | ||||
|         ] | ||||
|         if backup_type: | ||||
|             cmd_list += ["-b", backup_type] | ||||
|  | ||||
| 		proc = subprocess.Popen( | ||||
| 			cmd_list + options, | ||||
| 			stdout=stdout, | ||||
| 			stderr=stderr | ||||
| 		) | ||||
|         proc = subprocess.Popen( | ||||
|             cmd_list + options, | ||||
|             stdout=stdout, | ||||
|             stderr=stderr | ||||
|         ) | ||||
|  | ||||
| 		return proc | ||||
|         return proc | ||||
|  | ||||
| 	def restore_pb(self, node, id=None, options=[]): | ||||
| 		cmd_list = [ | ||||
| 			"-D", node.data_dir, | ||||
| 			"-B", self.backup_dir(node), | ||||
| 			"restore" | ||||
| 		] | ||||
| 		if id: | ||||
| 			cmd_list.append(id) | ||||
|     def restore_pb(self, node, id=None, options=[]): | ||||
|         cmd_list = [ | ||||
|             "-D", node.data_dir, | ||||
|             "-B", self.backup_dir(node), | ||||
|             "restore" | ||||
|         ] | ||||
|         if id: | ||||
|             cmd_list.append(id) | ||||
|  | ||||
| 		# print(cmd_list) | ||||
| 		return self.run_pb(cmd_list + options) | ||||
|         # print(cmd_list) | ||||
|         return self.run_pb(cmd_list + options) | ||||
|  | ||||
| 	def show_pb(self, node, id=None, options=[], as_text=False): | ||||
| 		cmd_list = [ | ||||
| 			"-B", self.backup_dir(node), | ||||
| 			"show", | ||||
| 		] | ||||
| 		if id: | ||||
| 			cmd_list += [id] | ||||
|     def show_pb(self, node, id=None, options=[], as_text=False): | ||||
|         cmd_list = [ | ||||
|             "show", | ||||
|             "-B", self.backup_dir(node), | ||||
|         ] | ||||
|         if id: | ||||
|             cmd_list += [id] | ||||
|  | ||||
| 		# print(cmd_list) | ||||
| 		if as_text: | ||||
| 			return self.run_pb(options + cmd_list) | ||||
| 		elif id is None: | ||||
| 			return [ShowBackup(line.split()) for line in self.run_pb(options + cmd_list).splitlines()[3:]] | ||||
| 		else: | ||||
| 			return dict([ | ||||
| 				line.split(six.b("=")) | ||||
| 				for line in self.run_pb(options + cmd_list).splitlines() | ||||
| 				if line[0] != six.b("#")[0] | ||||
| 			]) | ||||
|         # print(cmd_list) | ||||
|         if as_text: | ||||
|             return self.run_pb(options + cmd_list) | ||||
|         elif id is None: | ||||
|             return [ShowBackup(line.split()) for line in self.run_pb(options + cmd_list).splitlines()[3:]] | ||||
|         else: | ||||
|             return dict([ | ||||
|                 line.split(six.b("=")) | ||||
|                 for line in self.run_pb(options + cmd_list).splitlines() | ||||
|                 if line[0] != six.b("#")[0] | ||||
|             ]) | ||||
|  | ||||
| 	def validate_pb(self, node, id=None, options=[]): | ||||
| 		cmd_list = [ | ||||
| 			"-B", self.backup_dir(node), | ||||
| 			"validate", | ||||
| 		] | ||||
| 		if id: | ||||
| 			cmd_list += [id] | ||||
|     def validate_pb(self, node, id=None, options=[]): | ||||
|         cmd_list = [ | ||||
|             "-B", self.backup_dir(node), | ||||
|             "validate", | ||||
|         ] | ||||
|         if id: | ||||
|             cmd_list += [id] | ||||
|  | ||||
| 		# print(cmd_list) | ||||
| 		return self.run_pb(options + cmd_list) | ||||
|         # print(cmd_list) | ||||
|         return self.run_pb(options + cmd_list) | ||||
|  | ||||
| 	def delete_pb(self, node, id=None, options=[]): | ||||
| 		cmd_list = [ | ||||
| 			"-B", self.backup_dir(node), | ||||
| 			"delete", | ||||
| 		] | ||||
| 		if id: | ||||
| 			cmd_list += [id] | ||||
|     def delete_pb(self, node, id=None, options=[]): | ||||
|         cmd_list = [ | ||||
|             "-B", self.backup_dir(node), | ||||
|             "delete", | ||||
|         ] | ||||
|         if id: | ||||
|             cmd_list += [id] | ||||
|  | ||||
| 		# print(cmd_list) | ||||
| 		return self.run_pb(options + cmd_list) | ||||
|         # print(cmd_list) | ||||
|         return self.run_pb(options + cmd_list) | ||||
|  | ||||
| 	def retention_purge_pb(self, node, options=[]): | ||||
| 		cmd_list = [ | ||||
| 			"-B", self.backup_dir(node), | ||||
| 			"retention", "purge", | ||||
| 		] | ||||
|     def retention_purge_pb(self, node, options=[]): | ||||
|         cmd_list = [ | ||||
|             "-B", self.backup_dir(node), | ||||
|             "retention", "purge", | ||||
|         ] | ||||
|  | ||||
| 		return self.run_pb(options + cmd_list) | ||||
|         return self.run_pb(options + cmd_list) | ||||
|  | ||||
| 	def retention_show(self, node, options=[]): | ||||
| 		cmd_list = [ | ||||
| 			"-B", self.backup_dir(node), | ||||
| 			"retention", "show", | ||||
| 		] | ||||
|     def retention_show(self, node, options=[]): | ||||
|         cmd_list = [ | ||||
|             "-B", self.backup_dir(node), | ||||
|             "retention", "show", | ||||
|         ] | ||||
|  | ||||
| 		return self.run_pb(options + cmd_list) | ||||
|         return self.run_pb(options + cmd_list) | ||||
|  | ||||
| 	def get_control_data(self, node): | ||||
| 		pg_controldata = node.get_bin_path("pg_controldata") | ||||
| 		out_data = {} | ||||
| 		lines = subprocess.check_output( | ||||
| 			[pg_controldata] + ["-D", node.data_dir], | ||||
| 			stderr=subprocess.STDOUT, | ||||
| 			env=self.test_env | ||||
| 		).splitlines() | ||||
| 		for l in lines: | ||||
| 			key, value = l.split(b":", maxsplit=1) | ||||
| 			out_data[key.strip()] = value.strip() | ||||
| 		return out_data | ||||
|     def get_control_data(self, node): | ||||
|         pg_controldata = node.get_bin_path("pg_controldata") | ||||
|         out_data = {} | ||||
|         lines = subprocess.check_output( | ||||
|             [pg_controldata] + ["-D", node.data_dir], | ||||
|             stderr=subprocess.STDOUT, | ||||
|             env=self.test_env | ||||
|         ).splitlines() | ||||
|         for l in lines: | ||||
|             key, value = l.split(b":", maxsplit=1) | ||||
|             out_data[key.strip()] = value.strip() | ||||
|         return out_data | ||||
|  | ||||
| 	def get_recovery_conf(self, node): | ||||
| 		out_dict = {} | ||||
| 		with open(path.join(node.data_dir, "recovery.conf"), "r") as recovery_conf: | ||||
| 			for line in recovery_conf: | ||||
| 				try: | ||||
| 					key, value = line.split("=") | ||||
| 				except: | ||||
| 					continue | ||||
| 				out_dict[key.strip()] = value.strip(" '").replace("'\n", "") | ||||
|     def get_recovery_conf(self, node): | ||||
|         out_dict = {} | ||||
|         with open(path.join(node.data_dir, "recovery.conf"), "r") as recovery_conf: | ||||
|             for line in recovery_conf: | ||||
|                 try: | ||||
|                     key, value = line.split("=") | ||||
|                 except: | ||||
|                     continue | ||||
|                 out_dict[key.strip()] = value.strip(" '").replace("'\n", "") | ||||
|  | ||||
| 		return out_dict | ||||
|         return out_dict | ||||
|  | ||||
| 	def wrong_wal_clean(self, node, wal_size): | ||||
| 		wals_dir = path.join(self.backup_dir(node), "wal") | ||||
| 		wals = [f for f in listdir(wals_dir) if path.isfile(path.join(wals_dir, f))] | ||||
| 		wals.sort() | ||||
| 		file_path = path.join(wals_dir, wals[-1]) | ||||
| 		if path.getsize(file_path) != wal_size: | ||||
| 			os.remove(file_path) | ||||
|     def wrong_wal_clean(self, node, wal_size): | ||||
|         wals_dir = path.join(self.backup_dir(node), "wal") | ||||
|         wals = [f for f in listdir(wals_dir) if path.isfile(path.join(wals_dir, f))] | ||||
|         wals.sort() | ||||
|         file_path = path.join(wals_dir, wals[-1]) | ||||
|         if path.getsize(file_path) != wal_size: | ||||
|             os.remove(file_path) | ||||
|  | ||||
| 	def guc_wal_segment_size(self, node): | ||||
| 		var = node.execute("postgres", "select setting from pg_settings where name = 'wal_segment_size'") | ||||
| 		return int(var[0][0]) * self.guc_wal_block_size(node) | ||||
|     def guc_wal_segment_size(self, node): | ||||
|         var = node.execute("postgres", "select setting from pg_settings where name = 'wal_segment_size'") | ||||
|         return int(var[0][0]) * self.guc_wal_block_size(node) | ||||
|  | ||||
| 	def guc_wal_block_size(self, node): | ||||
| 		var = node.execute("postgres", "select setting from pg_settings where name = 'wal_block_size'") | ||||
| 		return int(var[0][0]) | ||||
|     def guc_wal_block_size(self, node): | ||||
|         var = node.execute("postgres", "select setting from pg_settings where name = 'wal_block_size'") | ||||
|         return int(var[0][0]) | ||||
|   | ||||
							
								
								
									
										100
									
								
								tests/ptrack_clean.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								tests/ptrack_clean.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,100 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| from testgres import get_new_node, stop_all | ||||
| #import os | ||||
| from os import path, open, lseek, read, close, O_RDONLY | ||||
| from .ptrack_helpers import ProbackupTest, idx_ptrack | ||||
|  | ||||
|  | ||||
| class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(SimpleTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
|     def teardown(self): | ||||
|         # clean_all() | ||||
|         stop_all() | ||||
|  | ||||
| #    @unittest.skip("123") | ||||
|     def test_ptrack_clean(self): | ||||
|         fname = self.id().split('.')[3] | ||||
|         node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname), | ||||
|             set_replication=True, | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         # Make full backup to clean every ptrack | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get fork size and calculate it in pages | ||||
|             idx_ptrack[i]['size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['size']) | ||||
|             self.check_ptrack_clean(idx_ptrack[i]) | ||||
|  | ||||
|         # Update everything, vacuum it and make PTRACK BACKUP | ||||
|         node.psql('postgres', 'update t_heap set text = md5(text), tsvector = md5(repeat(tsvector::text, 10))::tsvector;') | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|  | ||||
|         id = self.backup_pb(node, backup_type='ptrack', options=['-j100', '--stream']) | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes and calculate it in pages | ||||
|             idx_ptrack[i]['size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
|                 idx_ptrack[i]['path'], idx_ptrack[i]['size']) | ||||
|             # check that ptrack bits are cleaned | ||||
|             self.check_ptrack_clean(idx_ptrack[i]) | ||||
| # | ||||
| #        # Update everything, vacuum it and make PAGE BACKUP | ||||
| #        node.psql('postgres', 'update t_heap set text = md5(text), tsvector = md5(repeat(tsvector::text, 10))::tsvector;') | ||||
| #        node.psql('postgres', 'vacuum t_heap') | ||||
| # | ||||
| #        # Make page backup to clean every ptrack | ||||
| #        self.backup_pb(node, backup_type='page', options=['-j100']) | ||||
| #        node.psql('postgres', 'checkpoint') | ||||
| # | ||||
| #        for i in idx_ptrack: | ||||
| #            # get new size of heap and indexes and calculate it in pages | ||||
| #            idx_ptrack[i]['size'] = self.get_fork_size(node, i) | ||||
| #            # update path to heap and index files in case they`ve changed | ||||
| #            idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
| #            # # get ptrack for every idx | ||||
| #            idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork( | ||||
| #                idx_ptrack[i]['path'], idx_ptrack[i]['size']) | ||||
| #            # check that ptrack bits are cleaned | ||||
| #            self.check_ptrack_clean(idx_ptrack[i]) | ||||
|  | ||||
| #        print self.clean_pb(node) | ||||
| #        for i in self.show_pb(node): | ||||
| #            print i | ||||
|         self.show_pb(node, as_text=True) | ||||
|         self.clean_pb(node) | ||||
| #        print a | ||||
| #        print a.mode | ||||
|         node.stop() | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
							
								
								
									
										310
									
								
								tests/ptrack_cluster.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										310
									
								
								tests/ptrack_cluster.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,310 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| from testgres import get_new_node, stop_all | ||||
| #import os | ||||
| from os import path, open, lseek, read, close, O_RDONLY | ||||
| from .ptrack_helpers import ProbackupTest, idx_ptrack | ||||
|  | ||||
| #        res = node.execute('postgres', 'show fsync') | ||||
| #        print res[0][0] | ||||
| #        res = node.execute('postgres', 'show wal_level') | ||||
| #        print res[0][0] | ||||
| #        a = ProbackupTest | ||||
| #        res = node.execute('postgres', 'select 1')` | ||||
| #        self.assertEqual(len(res), 1) | ||||
| #        self.assertEqual(res[0][0], 1) | ||||
| #        node.stop() | ||||
| #        a = self.backup_dir(node) | ||||
|  | ||||
|  | ||||
| class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(SimpleTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
|     def teardown(self): | ||||
|         # clean_all() | ||||
|         stop_all() | ||||
|  | ||||
| #    @unittest.skip("123") | ||||
|     def test_ptrack_cluster_btree(self): | ||||
|         print 'test_ptrack_cluster_btree started' | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_cluster_btree", | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         res = node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate md5sums of pages | ||||
|             idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['old_size'], idx_ptrack[i]['path']) | ||||
|  | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|  | ||||
|         node.psql('postgres', 'delete from t_heap where id%2 = 1') | ||||
|         node.psql('postgres', 'cluster t_heap using t_btree') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['new_size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate new md5sums for pages | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['new_size'], idx_ptrack[i]['path']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|             self.check_ptrack_sanity(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
|     def test_ptrack_cluster_spgist(self): | ||||
|         print 'test_ptrack_cluster_spgist started' | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_cluster_spgist", | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         res = node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate md5sums of pages | ||||
|             idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['old_size'], idx_ptrack[i]['path']) | ||||
|  | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|  | ||||
|         node.psql('postgres', 'delete from t_heap where id%2 = 1') | ||||
|         node.psql('postgres', 'cluster t_heap using t_spgist') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['new_size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate new md5sums for pages | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['new_size'], idx_ptrack[i]['path']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|             self.check_ptrack_sanity(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
|     def test_ptrack_cluster_brin(self): | ||||
|         print 'test_ptrack_cluster_brin started' | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_cluster_brin", | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         res = node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate md5sums of pages | ||||
|             idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['old_size'], idx_ptrack[i]['path']) | ||||
|  | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|  | ||||
|         node.psql('postgres', 'delete from t_heap where id%2 = 1') | ||||
|         node.psql('postgres', 'cluster t_heap using t_brin') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['new_size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate new md5sums for pages | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['new_size'], idx_ptrack[i]['path']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|             self.check_ptrack_sanity(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
|     def test_ptrack_cluster_gist(self): | ||||
|         print 'test_ptrack_cluster_gist started' | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_cluster_gist", | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         res = node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate md5sums of pages | ||||
|             idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['old_size'], idx_ptrack[i]['path']) | ||||
|  | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|  | ||||
|         node.psql('postgres', 'delete from t_heap where id%2 = 1') | ||||
|         node.psql('postgres', 'cluster t_heap using t_gist') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['new_size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate new md5sums for pages | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['new_size'], idx_ptrack[i]['path']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|             self.check_ptrack_sanity(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
|     def test_ptrack_cluster_gin(self): | ||||
|         print 'test_ptrack_cluster_gin started' | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_cluster_gin", | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         res = node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate md5sums of pages | ||||
|             idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['old_size'], idx_ptrack[i]['path']) | ||||
|  | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|  | ||||
|         node.psql('postgres', 'delete from t_heap where id%2 = 1') | ||||
|         node.psql('postgres', 'cluster t_heap using t_gin') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['new_size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate new md5sums for pages | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['new_size'], idx_ptrack[i]['path']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|             self.check_ptrack_sanity(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
							
								
								
									
										514
									
								
								tests/ptrack_helpers.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										514
									
								
								tests/ptrack_helpers.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,514 @@ | ||||
| # you need os for unittest to work | ||||
| import os | ||||
| from sys import exit | ||||
| import subprocess | ||||
| import shutil | ||||
| import six | ||||
| from testgres import get_new_node | ||||
| import hashlib | ||||
| import re | ||||
|  | ||||
|  | ||||
| idx_ptrack = { | ||||
| 't_heap': { | ||||
|     'type': 'heap' | ||||
|     }, | ||||
| 't_btree': { | ||||
|     'type': 'btree', | ||||
|     'column': 'text', | ||||
|     'relation': 't_heap' | ||||
|     }, | ||||
| 't_spgist': { | ||||
|     'type': 'spgist', | ||||
|     'column': 'text', | ||||
|     'relation': 't_heap' | ||||
|     }, | ||||
| 't_brin': { | ||||
|     'type': 'brin', | ||||
|     'column': 'text', | ||||
|     'relation': 't_heap' | ||||
|     }, | ||||
| 't_gist': { | ||||
|     'type': 'gist', | ||||
|     'column': 'tsvector', | ||||
|     'relation': 't_heap' | ||||
|     }, | ||||
| 't_gin': { | ||||
|     'type': 'gin', | ||||
|     'column': 'tsvector', | ||||
|     'relation': 't_heap' | ||||
|     }, | ||||
| } | ||||
|  | ||||
| warning = """ | ||||
| Wrong splint in show_pb | ||||
| Original Header: | ||||
| {header} | ||||
| Original Body: | ||||
| {body} | ||||
| Splitted Header | ||||
| {header_split} | ||||
| Splitted Body | ||||
| {body_split} | ||||
| """ | ||||
|  | ||||
| # You can lookup error message and cmdline in exception object attributes | ||||
| class ProbackupException(Exception): | ||||
|     def __init__(self, message, cmd): | ||||
| #        print message | ||||
| #        self.message = repr(message).strip("'") | ||||
|         self.message = message | ||||
|         self.cmd = cmd | ||||
|     #need that to make second raise | ||||
|     def __str__(self): | ||||
|         return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd) | ||||
|  | ||||
|  | ||||
| def dir_files(base_dir): | ||||
|     out_list = [] | ||||
|     for dir_name, subdir_list, file_list in os.walk(base_dir): | ||||
|         if dir_name != base_dir: | ||||
|             out_list.append(os.path.relpath(dir_name, base_dir)) | ||||
|         for fname in file_list: | ||||
|             out_list.append(os.path.relpath(os.path.join(dir_name, fname), base_dir)) | ||||
|     out_list.sort() | ||||
|     return out_list | ||||
|  | ||||
|  | ||||
| class ShowBackup(object): | ||||
|     def __init__(self, line): | ||||
|         self.counter = 0 | ||||
|  | ||||
|         print split_line | ||||
|         self.id = self.get_inc(split_line) | ||||
|         # TODO: parse to datetime | ||||
|         if len(split_line) == 12: | ||||
|             self.recovery_time = "%s %s" % (self.get_inc(split_line), self.get_inc(split_line)) | ||||
|         # if recovery time is '----' | ||||
|         else: | ||||
|             self.recovery_time = self.get_inc(split_line) | ||||
|         self.mode = self.get_inc(split_line) | ||||
| #        print self.mode | ||||
|         self.wal = self.get_inc(split_line) | ||||
|         self.cur_tli = self.get_inc(split_line) | ||||
|         # slash | ||||
|         self.counter += 1 | ||||
|         self.parent_tli = self.get_inc(split_line) | ||||
|         # TODO: parse to interval | ||||
|         self.time = self.get_inc(split_line) | ||||
|         # TODO: maybe rename to size? | ||||
|         self.data = self.get_inc(split_line) | ||||
|         self.start_lsn = self.get_inc(split_line) | ||||
|         self.stop_lsn = self.get_inc(split_line) | ||||
|         self.status = self.get_inc(split_line) | ||||
|  | ||||
|     def get_inc(self, split_line): | ||||
| #        self.counter += 1 | ||||
| #        return split_line[self.counter - 1] | ||||
|          return split_line | ||||
|  | ||||
|  | ||||
| class ProbackupTest(object): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(ProbackupTest, self).__init__(*args, **kwargs) | ||||
|         self.test_env = os.environ.copy() | ||||
|         envs_list = [ | ||||
|             "LANGUAGE", | ||||
|             "LC_ALL", | ||||
|             "PGCONNECT_TIMEOUT", | ||||
|             "PGDATA", | ||||
|             "PGDATABASE", | ||||
|             "PGHOSTADDR", | ||||
|             "PGREQUIRESSL", | ||||
|             "PGSERVICE", | ||||
|             "PGSSLMODE", | ||||
|             "PGUSER", | ||||
|             "PGPORT", | ||||
|             "PGHOST" | ||||
|         ] | ||||
|  | ||||
|         for e in envs_list: | ||||
|             try: | ||||
|                 del self.test_env[e] | ||||
|             except: | ||||
|                 pass | ||||
|  | ||||
|         self.test_env["LC_MESSAGES"] = "C" | ||||
|         self.test_env["LC_TIME"] = "C" | ||||
|  | ||||
|         self.dir_path = os.path.dirname(os.path.realpath(__file__)) | ||||
|         try: | ||||
|             os.makedirs(os.path.join(self.dir_path, "tmp_dirs")) | ||||
|         except: | ||||
|             pass | ||||
|         self.probackup_path = os.path.abspath(os.path.join( | ||||
|             self.dir_path, | ||||
|             "../pg_probackup" | ||||
|         )) | ||||
|  | ||||
|     def arcwal_dir(self, node): | ||||
|         return "%s/backup/wal" % node.base_dir | ||||
|  | ||||
|     def backup_dir(self, node): | ||||
|         return os.path.abspath("%s/backup" % node.base_dir) | ||||
|  | ||||
|     def make_bnode(self, base_dir=None, allows_streaming=False, options={}): | ||||
|         real_base_dir = os.path.join(self.dir_path, base_dir) | ||||
|         shutil.rmtree(real_base_dir, ignore_errors=True) | ||||
|  | ||||
|         node = get_new_node('test', base_dir=real_base_dir) | ||||
|         node.init(allows_streaming=allows_streaming) | ||||
|  | ||||
|         if not allows_streaming: | ||||
|             node.append_conf("postgresql.auto.conf", "wal_level = hot_standby") | ||||
|         node.append_conf("postgresql.auto.conf", "archive_mode = on") | ||||
|         node.append_conf( | ||||
|             "postgresql.auto.conf", | ||||
|             """archive_command = 'cp "%%p" "%s/%%f"'""" % os.path.abspath(self.arcwal_dir(node)) | ||||
|         ) | ||||
|  | ||||
|         for key, value in six.iteritems(options): | ||||
|             node.append_conf("postgresql.conf", "%s = %s" % (key, value)) | ||||
|  | ||||
|         return node | ||||
|  | ||||
|  | ||||
|     def make_simple_node(self, base_dir=None, set_replication=False, | ||||
|                         set_archiving=False, initdb_params=[], pg_options={}): | ||||
|         real_base_dir = os.path.join(self.dir_path, base_dir) | ||||
|         shutil.rmtree(real_base_dir, ignore_errors=True) | ||||
|  | ||||
|         node = get_new_node('test', base_dir=real_base_dir) | ||||
|         node.init(initdb_params=initdb_params) | ||||
|  | ||||
|         # Sane default parameters, not a shit with fsync = off from testgres | ||||
|         node.append_conf("postgresql.auto.conf", "{0} = {1}".format('fsync', 'on')) | ||||
|         node.append_conf("postgresql.auto.conf", "{0} = {1}".format('wal_level', 'minimal')) | ||||
|  | ||||
|         # Apply given parameters | ||||
|         for key, value in six.iteritems(pg_options): | ||||
|             node.append_conf("postgresql.auto.conf", "%s = %s" % (key, value)) | ||||
|  | ||||
|         # Allow replication in pg_hba.conf | ||||
|         if set_replication: | ||||
|             node.set_replication_conf() | ||||
|         # Setup archiving for node | ||||
|         if set_archiving: | ||||
|             node.set_archiving_conf(self.arcwal_dir(node)) | ||||
|         return node | ||||
|  | ||||
|  | ||||
|     def create_tblspace_in_node(self, node, tblspc_name, cfs=False): | ||||
|         res = node.execute( | ||||
|             "postgres", "select exists (select 1 from pg_tablespace where spcname = '{0}')".format( | ||||
|                 tblspc_name)) | ||||
|         # Check that tablespace with name 'tblspc_name' do not exists already | ||||
|         self.assertEqual(res[0][0], False, 'Tablespace "{0}" already exists'.format(tblspc_name)) | ||||
|  | ||||
|         tblspc_path = os.path.join(node.base_dir, '{0}'.format(tblspc_name)) | ||||
|         cmd = "CREATE TABLESPACE {0} LOCATION '{1}'".format(tblspc_name, tblspc_path) | ||||
|         if cfs: | ||||
|             cmd += " with (compression=true)" | ||||
|         os.makedirs(tblspc_path) | ||||
|         res = node.psql("postgres", cmd) | ||||
|         # Check that tablespace was successfully created | ||||
|         self.assertEqual(res[0], 0, 'Failed to create tablespace with cmd: {0}'.format(cmd)) | ||||
|  | ||||
|  | ||||
|     def get_fork_size(self, node, fork_name): | ||||
|         return node.execute("postgres", | ||||
|             "select pg_relation_size('{0}')/8192".format(fork_name))[0][0] | ||||
|  | ||||
|     def get_fork_path(self, node, fork_name): | ||||
|         return os.path.join(node.base_dir, 'data', | ||||
|             node.execute("postgres", "select pg_relation_filepath('{0}')".format(fork_name))[0][0]) | ||||
|  | ||||
|     def get_md5_per_page_for_fork(self, size, file): | ||||
|         file = os.open(file, os.O_RDONLY) | ||||
|         offset = 0 | ||||
|         md5_per_page = {} | ||||
|         for page in range(size): | ||||
|             md5_per_page[page] = hashlib.md5(os.read(file, 8192)).hexdigest() | ||||
|             offset += 8192 | ||||
|             os.lseek(file, offset, 0) | ||||
|         os.close(file) | ||||
|         return md5_per_page | ||||
|  | ||||
|     def get_ptrack_bits_per_for_fork(self, file, size): | ||||
|         byte_size = os.path.getsize(file + '_ptrack') | ||||
|         byte_size_minus_header = byte_size - 24 | ||||
|         file = os.open(file + '_ptrack', os.O_RDONLY) | ||||
|         os.lseek(file, 24, 0) | ||||
|         lot_of_bytes = os.read(file, byte_size_minus_header) | ||||
|         ptrack_bits_per_for_fork = [] | ||||
|         for byte in lot_of_bytes: | ||||
|             byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1] | ||||
| #            byte_to_bits = (byte >> x) & 1 for x in range(7, -1, -1) | ||||
|             for bit in byte_inverted: | ||||
|                 while len(ptrack_bits_per_for_fork) != size: | ||||
|                     ptrack_bits_per_for_fork.append(int(bit)) | ||||
| #        print 'Size: {}'.format(size) | ||||
| #        print ptrack_bits_per_for_fork | ||||
|         os.close(file) | ||||
|         return ptrack_bits_per_for_fork | ||||
|  | ||||
|     def check_ptrack_sanity(self, idx_dict): | ||||
|         success = True | ||||
|         if idx_dict['new_size'] > idx_dict['old_size']: | ||||
|             size = idx_dict['new_size'] | ||||
|         else: | ||||
|             size = idx_dict['old_size'] | ||||
|  | ||||
|         for PageNum in range(size): | ||||
|             if PageNum not in idx_dict['old_pages']: | ||||
|                 # Page was not present before, meaning that relation got bigger | ||||
|                 # Ptrack should be equal to 1 | ||||
|                 if idx_dict['ptrack'][PageNum] != 1: | ||||
|                     print 'Page Number {0} of type {1} was added, but ptrack value is {2}. THIS IS BAD'.format( | ||||
|                         PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]) | ||||
|                     print idx_dict | ||||
|                     success = False | ||||
|                 continue | ||||
|             if PageNum not in idx_dict['new_pages']: | ||||
|                 # Page is not present now, meaning that relation got smaller | ||||
|                 # Ptrack should be equal to 0, We are not freaking out about false positive stuff | ||||
|                 if idx_dict['ptrack'][PageNum] != 0: | ||||
|                     print 'Page Number {0} of type {1} was deleted, but ptrack value is {2}'.format( | ||||
|                         PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]) | ||||
|                 continue | ||||
|             # Ok, all pages in new_pages that do not have corresponding page in old_pages | ||||
|             # are been dealt with. We can now safely proceed to comparing old and new pages  | ||||
|             if idx_dict['new_pages'][PageNum] != idx_dict['old_pages'][PageNum]: | ||||
|                 # Page has been changed, meaning that ptrack should be equal to 1 | ||||
|                 if idx_dict['ptrack'][PageNum] != 1: | ||||
|                     print 'Page Number {0} of type {1} was changed, but ptrack value is {2}. THIS IS BAD'.format( | ||||
|                         PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]) | ||||
|                     print idx_dict | ||||
|                     if PageNum == 0 and idx_dict['type'] == 'spgist': | ||||
|                         print 'SPGIST is a special showflake, so don`t freat about losing ptrack for blknum 0' | ||||
|                         continue | ||||
|                     success = False | ||||
|             else: | ||||
|                 # Page has not been changed, meaning that ptrack should be equal to 0 | ||||
|                 if idx_dict['ptrack'][PageNum] != 0: | ||||
|                     print 'Page Number {0} of type {1} was not changed, but ptrack value is {2}'.format( | ||||
|                         PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]) | ||||
|                     print idx_dict | ||||
|             self.assertEqual(success, True) | ||||
|  | ||||
|     def check_ptrack_recovery(self, idx_dict): | ||||
|         success = True | ||||
|         size = idx_dict['size'] | ||||
|         for PageNum in range(size): | ||||
|             if idx_dict['ptrack'][PageNum] != 1: | ||||
|                 print 'Recovery for Page Number {0} of Type {1} was conducted, but ptrack value is {2}. THIS IS BAD'.format( | ||||
|                     PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]) | ||||
|                 print idx_dict | ||||
|                 success = False | ||||
|             self.assertEqual(success, True) | ||||
|  | ||||
|     def check_ptrack_clean(self, idx_dict): | ||||
|         success = True | ||||
|         size = idx_dict['size'] | ||||
|         for PageNum in range(size): | ||||
|             if idx_dict['ptrack'][PageNum] != 0: | ||||
|                 print 'Ptrack for Page Number {0} of Type {1} should be clean, but ptrack value is {2}. THIS IS BAD'.format( | ||||
|                     PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]) | ||||
|                 print idx_dict | ||||
|                 success = False | ||||
|             self.assertEqual(success, True) | ||||
|  | ||||
|     def run_pb(self, command): | ||||
|         try: | ||||
|             # print [self.probackup_path] + command | ||||
|             output = subprocess.check_output( | ||||
|                 [self.probackup_path] + command, | ||||
|                 stderr=subprocess.STDOUT, | ||||
|                 env=self.test_env | ||||
|             ) | ||||
|             if command[0] == 'backup': | ||||
|                 if '-q' in command or '--quiet' in command: | ||||
|                     return None | ||||
|                 else: | ||||
|                     # return backup ID | ||||
|                     return output.split()[2] | ||||
|             else: | ||||
|                 return output | ||||
|         except subprocess.CalledProcessError as e: | ||||
|             raise  ProbackupException(e.output, e.cmd) | ||||
|  | ||||
|     def init_pb(self, node): | ||||
|  | ||||
|         return self.run_pb([ | ||||
|             "init", | ||||
|             "-B", self.backup_dir(node), | ||||
|             "-D", node.data_dir | ||||
|         ]) | ||||
|  | ||||
|     def clean_pb(self, node): | ||||
|         shutil.rmtree(self.backup_dir(node), ignore_errors=True) | ||||
|  | ||||
|     def backup_pb(self, node, backup_type="full", options=[]): | ||||
|         cmd_list = [ | ||||
|             "backup", | ||||
|             "-D", node.data_dir, | ||||
|             "-B", self.backup_dir(node), | ||||
|             "-p", "%i" % node.port, | ||||
|             "-d", "postgres" | ||||
|         ] | ||||
|         if backup_type: | ||||
|             cmd_list += ["-b", backup_type] | ||||
|  | ||||
|         return self.run_pb(cmd_list + options) | ||||
|  | ||||
|     def backup_pb_proc(self, node, backup_type="full", | ||||
|         stdout=None, stderr=None, options=[]): | ||||
|         cmd_list = [ | ||||
|             self.probackup_path, | ||||
|             "backup", | ||||
|             "-D", node.data_dir, | ||||
|             "-B", self.backup_dir(node), | ||||
|             "-p", "%i" % (node.port), | ||||
|             "-d", "postgres" | ||||
|         ] | ||||
|         if backup_type: | ||||
|             cmd_list += ["-b", backup_type] | ||||
|  | ||||
|         proc = subprocess.Popen( | ||||
|             cmd_list + options, | ||||
|             stdout=stdout, | ||||
|             stderr=stderr | ||||
|         ) | ||||
|  | ||||
|         return proc | ||||
|  | ||||
|     def restore_pb(self, node, id=None, options=[]): | ||||
|         cmd_list = [ | ||||
|             "restore", | ||||
|             "-D", node.data_dir, | ||||
|             "-B", self.backup_dir(node) | ||||
|         ] | ||||
|         if id: | ||||
|             cmd_list += ["-i", id] | ||||
|  | ||||
|         # print(cmd_list) | ||||
|         return self.run_pb(cmd_list + options) | ||||
|  | ||||
|     def show_pb(self, node, id=None, options=[], as_text=False): | ||||
|         backup_list = [] | ||||
|         specific_record = {} | ||||
|         cmd_list = [ | ||||
|             "show", | ||||
|             "-B", self.backup_dir(node), | ||||
|         ] | ||||
|         if id: | ||||
|             cmd_list += ["-i", id] | ||||
|  | ||||
|         if as_text: | ||||
|             # You should print it when calling as_text=true | ||||
|             return self.run_pb(cmd_list + options) | ||||
|  | ||||
|         # get show result as list of lines | ||||
|         show_splitted = self.run_pb(cmd_list + options).splitlines() | ||||
|         if id is None: | ||||
|             # cut header(ID, Mode, etc) from show as single string | ||||
|             header = show_splitted[1:2][0] | ||||
|             # cut backup records from show as single list with string for every backup record | ||||
|             body = show_splitted[3:] | ||||
|             # inverse list so oldest record come first | ||||
|             body = body[::-1] | ||||
|             # split string in list with string for every header element | ||||
|             header_split = re.split("  +", header) | ||||
|             # CRUNCH, remove last item, because it empty, like that '' | ||||
|             header_split.pop() | ||||
|             for backup_record in body: | ||||
|                 # split string in list with string for every backup record element | ||||
|                 backup_record_split = re.split("  +", backup_record) | ||||
|                 # CRUNCH, remove last item, because it empty, like that '' | ||||
|                 backup_record_split.pop() | ||||
|                 if len(header_split) != len(backup_record_split): | ||||
|                     print warning.format( | ||||
|                         header=header, body=body, | ||||
|                         header_split=header_split, body_split=backup_record_split) | ||||
|                     exit(1) | ||||
|                 new_dict = dict(zip(header_split, backup_record_split)) | ||||
|                 backup_list.append(new_dict) | ||||
|             return backup_list | ||||
|         else: | ||||
|             # cut out empty lines and lines started with # | ||||
|             # and other garbage then reconstruct it as dictionary | ||||
|             print show_splitted | ||||
|             sanitized_show = [item for item in show_splitted if item] | ||||
|             sanitized_show = [item for item in sanitized_show if not item.startswith('#')] | ||||
|             print sanitized_show | ||||
|             for line in sanitized_show: | ||||
|                 name, var = line.partition(" = ")[::2] | ||||
|                 var = var.strip('"') | ||||
|                 var = var.strip("'") | ||||
|                 specific_record[name.strip()] = var | ||||
|             return specific_record | ||||
|  | ||||
|     def validate_pb(self, node, id=None, options=[]): | ||||
|         cmd_list = [ | ||||
|             "validate", | ||||
|             "-B", self.backup_dir(node), | ||||
|         ] | ||||
|         if id: | ||||
|             cmd_list += ["-i", id] | ||||
|  | ||||
|         # print(cmd_list) | ||||
|         return self.run_pb(cmd_list + options) | ||||
|  | ||||
|     def delete_pb(self, node, id=None, options=[]): | ||||
|         cmd_list = [ | ||||
|             "delete", | ||||
|             "-B", self.backup_dir(node), | ||||
|         ] | ||||
|         if id: | ||||
|             cmd_list += ["-i", id] | ||||
|  | ||||
|         # print(cmd_list) | ||||
|         return self.run_pb(cmd_list + options) | ||||
|  | ||||
|     def retention_purge_pb(self, node, options=[]): | ||||
|         cmd_list = [ | ||||
|             "retention", "purge", | ||||
|             "-B", self.backup_dir(node), | ||||
|         ] | ||||
|         return self.run_pb(cmd_list + options) | ||||
|  | ||||
|     def retention_show(self, node, options=[]): | ||||
|         cmd_list = [ | ||||
|             "config", | ||||
|             "-B", self.backup_dir(node), | ||||
|         ] | ||||
|         return self.run_pb(cmd_list + options) | ||||
|  | ||||
|     def get_recovery_conf(self, node): | ||||
|         out_dict = {} | ||||
|         with open(os.path.join(node.data_dir, "recovery.conf"), "r") as recovery_conf: | ||||
|             for line in recovery_conf: | ||||
|                 try: | ||||
|                     key, value = line.split("=") | ||||
|                 except: | ||||
|                     continue | ||||
|                 out_dict[key.strip()] = value.strip(" '").replace("'\n", "") | ||||
|         return out_dict | ||||
|  | ||||
|     def wrong_wal_clean(self, node, wal_size): | ||||
|         wals_dir = os.path.join(self.backup_dir(node), "wal") | ||||
|         wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] | ||||
|         wals.sort() | ||||
|         file_path = os.path.join(wals_dir, wals[-1]) | ||||
|         if os.path.getsize(file_path) != wal_size: | ||||
|             os.remove(file_path) | ||||
|  | ||||
|     def guc_wal_segment_size(self, node): | ||||
|         var = node.execute("postgres", "select setting from pg_settings where name = 'wal_segment_size'") | ||||
|         return int(var[0][0]) * self.guc_wal_block_size(node) | ||||
|  | ||||
|     def guc_wal_block_size(self, node): | ||||
|         var = node.execute("postgres", "select setting from pg_settings where name = 'wal_block_size'") | ||||
|         return int(var[0][0]) | ||||
|  | ||||
| #    def ptrack_node(self, ptrack_enable=False, wal_level='minimal', max_wal_senders='2', allow_replication=True) | ||||
							
								
								
									
										59
									
								
								tests/ptrack_move_to_tablespace.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								tests/ptrack_move_to_tablespace.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,59 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| from testgres import get_new_node, stop_all | ||||
| import os | ||||
| from signal import SIGTERM | ||||
| from .ptrack_helpers import ProbackupTest, idx_ptrack | ||||
| from time import sleep | ||||
|  | ||||
|  | ||||
| class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(SimpleTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
|     def teardown(self): | ||||
|         # clean_all() | ||||
|         stop_all() | ||||
|  | ||||
|     def test_ptrack_recovery(self): | ||||
|         fname = self.id().split(".")[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname), | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         node.psql("postgres", | ||||
|             "create table t_heap as select i as id, md5(i::text) as text,md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] != 'heap': | ||||
|                 node.psql("postgres", "create index {0} on {1} using {2}({3})".format( | ||||
|                     i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         # Move table and indexes and make checkpoint | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 node.psql('postgres', 'alter table {0} set tablespace somedata;'.format(i)) | ||||
|                 continue | ||||
|             node.psql('postgres', 'alter index {0} set tablespace somedata'.format(i)) | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|             # check that ptrack has correct bits after recovery | ||||
|             self.check_ptrack_recovery(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
							
								
								
									
										61
									
								
								tests/ptrack_recovery.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										61
									
								
								tests/ptrack_recovery.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,61 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| from testgres import get_new_node, stop_all | ||||
| import os | ||||
| from signal import SIGTERM | ||||
| from .ptrack_helpers import ProbackupTest, idx_ptrack | ||||
| from time import sleep | ||||
|  | ||||
|  | ||||
| class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(SimpleTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
|     def teardown(self): | ||||
|         # clean_all() | ||||
|         stop_all() | ||||
|  | ||||
|     def test_ptrack_recovery(self): | ||||
|         fname = self.id().split(".")[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname), | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table | ||||
|         node.psql("postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text,md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         # Create indexes | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] != 'heap': | ||||
|                 node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                     i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['size'] = int(self.get_fork_size(node, i)) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|  | ||||
|         print 'Killing postmaster. Losing Ptrack changes' | ||||
|         node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})     | ||||
|         if not node.status(): | ||||
|             node.start() | ||||
|         else: | ||||
|             print "Die! Die! Why won't you die?... Why won't you die?" | ||||
|             exit(1) | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|             # check that ptrack has correct bits after recovery | ||||
|             self.check_ptrack_recovery(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
							
								
								
									
										92
									
								
								tests/ptrack_vacuum.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								tests/ptrack_vacuum.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,92 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| from testgres import get_new_node, stop_all | ||||
| #import os | ||||
| from os import path, open, lseek, read, close, O_RDONLY | ||||
| from .ptrack_helpers import ProbackupTest, idx_ptrack | ||||
|  | ||||
| #        res = node.execute('postgres', 'show fsync') | ||||
| #        print res[0][0] | ||||
| #        res = node.execute('postgres', 'show wal_level') | ||||
| #        print res[0][0] | ||||
| #        a = ProbackupTest | ||||
| #        res = node.execute('postgres', 'select 1')` | ||||
| #        self.assertEqual(len(res), 1) | ||||
| #        self.assertEqual(res[0][0], 1) | ||||
| #        node.stop() | ||||
| #        a = self.backup_dir(node) | ||||
|  | ||||
|  | ||||
| class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(SimpleTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
|     def teardown(self): | ||||
|         # clean_all() | ||||
|         stop_all() | ||||
|  | ||||
| #    @unittest.skip("123") | ||||
|     def test_ptrack_vacuum(self): | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname), | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get fork size and calculate it in pages | ||||
|             idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate md5sums for every page of this fork | ||||
|             idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['old_size'], idx_ptrack[i]['path']) | ||||
|  | ||||
|         # Make full backup to clean every ptrack | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|         for i in idx_ptrack: | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|             self.check_ptrack_clean(idx_ptrack[i]) | ||||
|  | ||||
|         # Delete some rows, vacuum it and make checkpoint | ||||
|         node.psql('postgres', 'delete from t_heap where id%2 = 1') | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes and calculate it in pages | ||||
|             idx_ptrack[i]['new_size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate new md5sums for pages | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['new_size'], idx_ptrack[i]['path']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|             self.check_ptrack_sanity(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
							
								
								
									
										71
									
								
								tests/ptrack_vacuum_bits_frozen.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								tests/ptrack_vacuum_bits_frozen.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,71 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| from testgres import get_new_node, stop_all | ||||
| from os import path, open, lseek, read, close, O_RDONLY | ||||
| from .ptrack_helpers import ProbackupTest, idx_ptrack | ||||
|  | ||||
|  | ||||
| class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(SimpleTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
|     def teardown(self): | ||||
|         # clean_all() | ||||
|         stop_all() | ||||
|  | ||||
|     def test_ptrack_vacuum_bits_frozen(self): | ||||
|         print 'test_ptrack_vacuum_bits_frozen started' | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_vacuum_bits_frozen", | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         res = node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate md5sums of pages | ||||
|             idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['old_size'], idx_ptrack[i]['path']) | ||||
|  | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|  | ||||
|         node.psql('postgres', 'vacuum freeze t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['new_size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate new md5sums for pages | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['new_size'], idx_ptrack[i]['path']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|             self.check_ptrack_sanity(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
							
								
								
									
										71
									
								
								tests/ptrack_vacuum_bits_visibility.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								tests/ptrack_vacuum_bits_visibility.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,71 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| from testgres import get_new_node, stop_all | ||||
| from os import path, open, lseek, read, close, O_RDONLY | ||||
| from .ptrack_helpers import ProbackupTest, idx_ptrack | ||||
|  | ||||
|  | ||||
| class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(SimpleTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
|     def teardown(self): | ||||
|         # clean_all() | ||||
|         stop_all() | ||||
|  | ||||
|     def test_ptrack_vacuum_bits_visibility(self): | ||||
|         print 'test_ptrack_vacuum_bits_visibility started' | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_vacuum_bits_visibility", | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         res = node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate md5sums of pages | ||||
|             idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['old_size'], idx_ptrack[i]['path']) | ||||
|  | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|  | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['new_size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate new md5sums for pages | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['new_size'], idx_ptrack[i]['path']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|             self.check_ptrack_sanity(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
							
								
								
									
										85
									
								
								tests/ptrack_vacuum_full.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								tests/ptrack_vacuum_full.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,85 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| from testgres import get_new_node, stop_all | ||||
| #import os | ||||
| from os import path, open, lseek, read, close, O_RDONLY | ||||
| from .ptrack_helpers import ProbackupTest, idx_ptrack | ||||
|  | ||||
| #        res = node.execute('postgres', 'show fsync') | ||||
| #        print res[0][0] | ||||
| #        res = node.execute('postgres', 'show wal_level') | ||||
| #        print res[0][0] | ||||
| #        a = ProbackupTest | ||||
| #        res = node.execute('postgres', 'select 1')` | ||||
| #        self.assertEqual(len(res), 1) | ||||
| #        self.assertEqual(res[0][0], 1) | ||||
| #        node.stop() | ||||
| #        a = self.backup_dir(node) | ||||
|  | ||||
|  | ||||
| class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(SimpleTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
|     def teardown(self): | ||||
|         # clean_all() | ||||
|         stop_all() | ||||
|  | ||||
|     def test_ptrack_vacuum_full(self): | ||||
|         print 'test_ptrack_vacuum_full started' | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_vacuum_full", | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         res = node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,127) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate md5sums of pages | ||||
|             idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['old_size'], idx_ptrack[i]['path']) | ||||
|  | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|  | ||||
|         node.psql('postgres', 'delete from t_heap where id%2 = 1') | ||||
|         node.psql('postgres', 'vacuum full t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['new_size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate new md5sums for pages | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['new_size'], idx_ptrack[i]['path']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity, most important  | ||||
|             self.check_ptrack_sanity(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
							
								
								
									
										73
									
								
								tests/ptrack_vacuum_truncate.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								tests/ptrack_vacuum_truncate.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,73 @@ | ||||
| import unittest | ||||
| from sys import exit | ||||
| from testgres import get_new_node, stop_all | ||||
| from os import path, open, lseek, read, close, O_RDONLY | ||||
| from .ptrack_helpers import ProbackupTest, idx_ptrack | ||||
|  | ||||
|  | ||||
| class SimpleTest(ProbackupTest, unittest.TestCase): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(SimpleTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
|     def teardown(self): | ||||
|         # clean_all() | ||||
|         stop_all() | ||||
|  | ||||
|     def test_ptrack_vacuum_truncate(self): | ||||
|         print 'test_ptrack_vacuum_truncate started' | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_vacuum_truncate", | ||||
|             set_replication=True, | ||||
|             initdb_params=['--data-checksums', '-A trust'], | ||||
|             pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'}) | ||||
|  | ||||
|         node.start() | ||||
|         self.create_tblspace_in_node(node, 'somedata') | ||||
|  | ||||
|         # Create table and indexes | ||||
|         res = node.psql( | ||||
|             "postgres", | ||||
|             "create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") | ||||
|         for i in idx_ptrack: | ||||
|             if idx_ptrack[i]['type'] == 'heap': | ||||
|                 continue | ||||
|             node.psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format( | ||||
|                 i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column'])) | ||||
|  | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['old_size'] = self.get_fork_size(node, i) | ||||
|             # get path to heap and index files | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate md5sums of pages | ||||
|             idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['old_size'], idx_ptrack[i]['path']) | ||||
|  | ||||
|         self.init_pb(node) | ||||
|         self.backup_pb(node, backup_type='full', options=['-j100', '--stream']) | ||||
|  | ||||
|         node.psql('postgres', 'delete from t_heap where id > 128;') | ||||
|         node.psql('postgres', 'vacuum t_heap') | ||||
|         node.psql('postgres', 'checkpoint') | ||||
|  | ||||
|         for i in idx_ptrack: | ||||
|             # get new size of heap and indexes. size calculated in pages | ||||
|             idx_ptrack[i]['new_size'] = self.get_fork_size(node, i) | ||||
|             # update path to heap and index files in case they`ve changed | ||||
|             idx_ptrack[i]['path'] = self.get_fork_path(node, i) | ||||
|             # calculate new md5sums for pages | ||||
|             idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork( | ||||
|                 idx_ptrack[i]['new_size'], idx_ptrack[i]['path']) | ||||
|             # get ptrack for every idx | ||||
|             idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path']) | ||||
|  | ||||
|             # compare pages and check ptrack sanity | ||||
|             self.check_ptrack_sanity(idx_ptrack[i]) | ||||
|  | ||||
|         self.clean_pb(node) | ||||
|         node.stop() | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -2,48 +2,54 @@ import unittest | ||||
| import os | ||||
| from os import path | ||||
| import six | ||||
| from .pb_lib import ProbackupTest | ||||
| from .ptrack_helpers import ProbackupTest | ||||
| from testgres import stop_all | ||||
|  | ||||
|  | ||||
| class OptionTest(ProbackupTest, unittest.TestCase): | ||||
|  | ||||
| 	def __init__(self, *args, **kwargs): | ||||
| 		super(OptionTest, self).__init__(*args, **kwargs) | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(OptionTest, self).__init__(*args, **kwargs) | ||||
|  | ||||
| 	@classmethod | ||||
| 	def tearDownClass(cls): | ||||
| 		stop_all() | ||||
|     @classmethod | ||||
|     def tearDownClass(cls): | ||||
|         stop_all() | ||||
|  | ||||
| 	def test_ok_1(self): | ||||
| 		"""Status DONE and OK""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/show/ok_1") | ||||
| 		node.start() | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|     def show_test_1(self): | ||||
|         """Status DONE and OK""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/show/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|  | ||||
| 		self.assertEqual( | ||||
| 			self.backup_pb(node, options=["--quiet"]), | ||||
| 			six.b("") | ||||
| 		) | ||||
| 		self.assertIn(six.b("OK"), self.show_pb(node, as_text=True)) | ||||
|         self.assertEqual( | ||||
|             self.backup_pb(node, options=["--quiet"]), | ||||
|             None | ||||
|         ) | ||||
|         self.assertIn(six.b("OK"), self.show_pb(node, as_text=True)) | ||||
|         node.stop() | ||||
|  | ||||
| 		node.stop() | ||||
|     def test_corrupt_2(self): | ||||
|         """Status CORRUPT""" | ||||
|         fname = self.id().split('.')[3] | ||||
|         print '{0} started'.format(fname) | ||||
|         node = self.make_simple_node(base_dir="tmp_dirs/show/{0}".format(fname), | ||||
|             set_archiving=True, | ||||
|             initdb_params=['--data-checksums'], | ||||
|             pg_options={'wal_level': 'replica'} | ||||
|             ) | ||||
|         node.start() | ||||
|         self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         id_backup = self.backup_pb(node) | ||||
|  | ||||
| 	def test_corrupt_2(self): | ||||
| 		"""Status DONE and OK""" | ||||
| 		node = self.make_bnode(base_dir="tmp_dirs/show/corrupt_2") | ||||
| 		node.start() | ||||
| 		self.assertEqual(self.init_pb(node), six.b("")) | ||||
|         path.join(self.backup_dir(node), "backups", id_backup.decode("utf-8"), "database", "postgresql.conf") | ||||
|         os.remove(path.join(self.backup_dir(node), "backups", id_backup.decode("utf-8"), "database", "postgresql.conf")) | ||||
|  | ||||
| 		self.assertEqual( | ||||
| 			self.backup_pb(node, options=["--quiet"]), | ||||
| 			six.b("") | ||||
| 		) | ||||
|  | ||||
| 		id_backup = self.show_pb(node)[0].id | ||||
| 		os.remove(path.join(self.backup_dir(node), "backups", id_backup.decode("utf-8"), "database", "postgresql.conf")) | ||||
|  | ||||
| 		self.validate_pb(node, id_backup) | ||||
| 		self.assertIn(six.b("CORRUPT"), self.show_pb(node, as_text=True)) | ||||
|  | ||||
| 		node.stop() | ||||
|         self.validate_pb(node, id_backup) | ||||
|         self.assertIn(six.b("CORRUPT"), self.show_pb(node, as_text=True)) | ||||
|         node.stop() | ||||
|   | ||||
		Reference in New Issue
	
	Block a user