diff --git a/tests/__init__.py b/tests/__init__.py index 4aad5888..d5f8b248 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -15,7 +15,7 @@ def load_tests(loader, tests, pattern): # suite.addTests(loader.loadTestsFromModule(auth_test)) suite.addTests(loader.loadTestsFromModule(archive)) suite.addTests(loader.loadTestsFromModule(backup_test)) -# suite.addTests(loader.loadTestsFromModule(cfs_backup)) + suite.addTests(loader.loadTestsFromModule(cfs_backup)) # suite.addTests(loader.loadTestsFromModule(cfs_restore)) # suite.addTests(loader.loadTestsFromModule(cfs_validate_backup)) # suite.addTests(loader.loadTestsFromModule(logging)) diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index 4cf99f1b..8e896795 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -15,14 +15,15 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def setUp(self): self.fname = self.id().split('.')[3] - self.backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + self.backup_dir = os.path.join( + self.tmp_path, module_name, self.fname, 'backup') self.node = self.make_simple_node( base_dir="{0}/{1}/node".format(module_name, self.fname), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'wal_level': 'replica', -# 'ptrack_enable': 'on', + 'ptrack_enable': 'on', 'cfs_encryption': 'off', 'max_wal_senders': '2', 'shared_buffers': '200MB' @@ -39,15 +40,19 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): tblspace = self.node.safe_psql( "postgres", - "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(tblspace_name) + "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format( + tblspace_name) ) self.assertTrue( tblspace_name in tblspace and "compression=true" in tblspace, - "ERROR: The tablespace not created or it create without compressions" + "ERROR: The tablespace not created " + "or it create without compressions" ) self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), "ERROR: File pg_compression not found" ) @@ -60,7 +65,8 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): backup_id = None try: - backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -72,10 +78,13 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertEqual( "OK", show_backup["status"], - "ERROR: Full backup status is not valid. \n Current backup status={0}".format(show_backup["status"]) + "ERROR: Full backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) ) self.assertTrue( - find_by_name([os.path.join(self.backup_dir, 'backups', 'node', backup_id)], ['pg_compression']), + find_by_name( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['pg_compression']), "ERROR: File pg_compression not found in backup dir" ) @@ -87,7 +96,9 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): backup_id = None try: - backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full', options=['--stream']) + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -100,10 +111,13 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertEqual( "OK", show_backup["status"], - "ERROR: Full backup status is not valid. \n Current backup status={0}".format(show_backup["status"]) + "ERROR: Full backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) ) self.assertTrue( - find_by_name([os.path.join(self.backup_dir, 'backups', 'node', backup_id)], ['pg_compression']), + find_by_name( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['pg_compression']), "ERROR: File pg_compression not found in backup dir" ) @@ -118,15 +132,16 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) ) backup_id = None try: - backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') except ProbackupException as e: self.fail( "\n ERROR: {0}\n CMD: {1}".format( @@ -139,14 +154,20 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertEqual( "OK", show_backup["status"], - "ERROR: Full backup status is not valid. \n Current backup status={0}".format(show_backup["status"]) + "ERROR: Full backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) ) self.assertTrue( - find_by_name([os.path.join(self.backup_dir, 'backups', 'node', backup_id)], ['pg_compression']), - "ERROR: File pg_compression not found in {0}".format(os.path.join(self.backup_dir, 'node', backup_id)) + find_by_name( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['pg_compression']), + "ERROR: File pg_compression not found in {0}".format( + os.path.join(self.backup_dir, 'node', backup_id)) ) self.assertTrue( - find_by_extensions([os.path.join(self.backup_dir, 'backups', 'node', backup_id)], ['.cfm']), + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), "ERROR: .cfm files not found in backup dir" ) @@ -161,15 +182,17 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) ) backup_id = None try: - backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full', options=['--stream']) + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -181,14 +204,19 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertEqual( "OK", show_backup["status"], - "ERROR: Full backup status is not valid. \n Current backup status={0}".format(show_backup["status"]) + "ERROR: Full backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) ) self.assertTrue( - find_by_name([os.path.join(self.backup_dir, 'backups', 'node', backup_id)], ['pg_compression']), + find_by_name( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['pg_compression']), "ERROR: File pg_compression not found in backup dir" ) self.assertTrue( - find_by_extensions([os.path.join(self.backup_dir, 'node', backup_id)], ['.cfm']), + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), "ERROR: .cfm files not found in backup dir" ) @@ -203,7 +231,8 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): """ try: - self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') + self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -214,15 +243,16 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) ) backup_id = None try: - backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='ptrack') + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='ptrack') except ProbackupException as e: self.fail( "ERROR: Incremental backup failed.\n {0} \n {1}".format( @@ -234,14 +264,19 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertEqual( "OK", show_backup["status"], - "ERROR: Incremental backup status is not valid. \n Current backup status={0}".format(show_backup["status"]) + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) ) self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), "ERROR: File pg_compression not found" ) self.assertTrue( - find_by_extensions([os.path.join(self.backup_dir, 'node', backup_id)], ['.cfm']), + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), "ERROR: .cfm files not found in backup dir" ) @@ -255,7 +290,9 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): """ try: - self.backup_node(self.backup_dir, 'node', self.node, backup_type='full', options=['--stream']) + self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -266,15 +303,17 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) ) backup_id = None try: - backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='ptrack', options=['--stream']) + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='ptrack', options=['--stream']) except ProbackupException as e: self.fail( "ERROR: Incremental backup failed.\n {0} \n {1}".format( @@ -286,18 +325,25 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertEqual( "OK", show_backup["status"], - "ERROR: Incremental backup status is not valid. \n Current backup status={0}".format(show_backup["status"]) + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) ) self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), "ERROR: File pg_compression not found" ) self.assertTrue( - find_by_extensions([os.path.join(self.backup_dir, 'node', backup_id)], ['.cfm']), + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), "ERROR: .cfm files not found in backup dir" ) self.assertFalse( - find_by_extensions([os.path.join(self.backup_dir, 'backups', 'node', backup_id)], ['_ptrack']), + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['_ptrack']), "ERROR: _ptrack files was found in backup dir" ) @@ -311,7 +357,8 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): """ try: - self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') + self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -322,15 +369,16 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) ) backup_id = None try: - backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='page') + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') except ProbackupException as e: self.fail( "ERROR: Incremental backup failed.\n {0} \n {1}".format( @@ -342,14 +390,19 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertEqual( "OK", show_backup["status"], - "ERROR: Incremental backup status is not valid. \n Current backup status={0}".format(show_backup["status"]) + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) ) self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), "ERROR: File pg_compression not found" ) self.assertTrue( - find_by_extensions([os.path.join(self.backup_dir, 'backups', 'node', backup_id)], ['.cfm']), + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), "ERROR: .cfm files not found in backup dir" ) @@ -363,7 +416,9 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): """ try: - self.backup_node(self.backup_dir, 'node', self.node, backup_type='full', options=['--stream']) + self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -374,15 +429,17 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) ) backup_id = None try: - backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='page', options=['--stream']) + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='page', options=['--stream']) except ProbackupException as e: self.fail( "ERROR: Incremental backup failed.\n {0} \n {1}".format( @@ -394,23 +451,30 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertEqual( "OK", show_backup["status"], - "ERROR: Incremental backup status is not valid. \n Current backup status={0}".format(show_backup["status"]) + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) ) self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), "ERROR: File pg_compression not found" ) self.assertTrue( - find_by_extensions([os.path.join(self.backup_dir, 'backups', 'node', backup_id)], ['.cfm']), + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), "ERROR: .cfm files not found in backup dir" ) self.assertFalse( - find_by_extensions([os.path.join(self.backup_dir, 'backups', 'node', backup_id)], ['_ptrack']), + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['_ptrack']), "ERROR: _ptrack files was found in backup dir" ) # --- Section: Incremental from fill tablespace --- # - # @unittest.expectedFailure + @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_fullbackup_after_create_table_ptrack_after_create_table(self): @@ -422,15 +486,16 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format('t1', tblspace_name) ) backup_id_full = None try: - backup_id_full = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -441,15 +506,16 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,25) i'.format('t2', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format('t2', tblspace_name) ) backup_id_ptrack = None try: - backup_id_ptrack = self.backup_node(self.backup_dir, 'node', self.node, backup_type='ptrack') + backup_id_ptrack = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='ptrack') except ProbackupException as e: self.fail( "ERROR: Incremental backup failed.\n {0} \n {1}".format( @@ -458,38 +524,43 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): ) ) - show_backup_full = self.show_pb(self.backup_dir, 'node', backup_id_full) - show_backup_ptrack = self.show_pb(self.backup_dir, 'node', backup_id_ptrack) + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_ptrack = self.show_pb( + self.backup_dir, 'node', backup_id_ptrack) self.assertGreater( - show_backup_ptrack["data-bytes"], show_backup_full["data-bytes"], - "ERROR: Size of incremental backup greater as full. \n INFO: {0} >{1}".format( + show_backup_ptrack["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( show_backup_ptrack["data-bytes"], show_backup_full["data-bytes"] ) ) - # @unittest.expectedFailure + @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_fullbackup_after_create_table_ptrack_after_create_table_stream(self): """ Case: Make full backup before created table in the tablespace(--stream). Make ptrack backup after create table(--stream). - Check: incremental backup will not greater as full + Check: incremental backup size should not be greater than full """ self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format('t1', tblspace_name) ) backup_id_full = None try: - backup_id_full = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full', options=['--stream']) + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -500,15 +571,17 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,25) i'.format('t2', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,25) i".format('t2', tblspace_name) ) backup_id_ptrack = None try: - backup_id_ptrack = self.backup_node(self.backup_dir, 'node', self.node, backup_type='ptrack', options=['--stream']) + backup_id_ptrack = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='ptrack', options=['--stream']) except ProbackupException as e: self.fail( "ERROR: Incremental backup failed.\n {0} \n {1}".format( @@ -517,38 +590,42 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): ) ) - show_backup_full = self.show_pb(self.backup_dir, 'node', backup_id_full) - show_backup_ptrack = self.show_pb(self.backup_dir, 'node', backup_id_ptrack) + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_ptrack = self.show_pb( + self.backup_dir, 'node', backup_id_ptrack) self.assertGreater( - show_backup_ptrack["data-bytes"], show_backup_full["data-bytes"], - "ERROR: Size of incremental backup greater as full. \n INFO: {0} >{1}".format( + show_backup_ptrack["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( show_backup_ptrack["data-bytes"], show_backup_full["data-bytes"] ) ) - # @unittest.expectedFailure + @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_fullbackup_after_create_table_page_after_create_table(self): """ Case: Make full backup before created table in the tablespace. Make ptrack backup after create table. - Check: incremental backup will not greater as full + Check: incremental backup size should not be greater than full """ self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format('t1', tblspace_name) ) backup_id_full = None try: - backup_id_full = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -559,15 +636,16 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,25) i'.format('t2', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format('t2', tblspace_name) ) backup_id_page = None try: - backup_id_page = self.backup_node(self.backup_dir, 'node', self.node, backup_type='page') + backup_id_page = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') except ProbackupException as e: self.fail( "ERROR: Incremental backup failed.\n {0} \n {1}".format( @@ -576,12 +654,15 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): ) ) - show_backup_full = self.show_pb(self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb(self.backup_dir, 'node', backup_id_page) + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_page = self.show_pb( + self.backup_dir, 'node', backup_id_page) self.assertGreater( - show_backup_page["data-bytes"], show_backup_full["data-bytes"], - "ERROR: Size of incremental backup greater as full. \n INFO: {0} >{1}".format( + show_backup_page["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( show_backup_page["data-bytes"], show_backup_full["data-bytes"] ) @@ -599,16 +680,18 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - "CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,10050000) i".format('t_heap', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format( + 't_heap', tblspace_name) ) full_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap") try: - backup_id_full = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -619,16 +702,18 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'INSERT INTO {0} \ - SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,10050000) i'.format('t_heap') + "INSERT INTO {0} " + "SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format( + 't_heap') ) page_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap") try: - backup_id_page = self.backup_node(self.backup_dir, 'node', self.node, backup_type='page') + backup_id_page = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') except ProbackupException as e: self.fail( "ERROR: Incremental backup failed.\n {0} \n {1}".format( @@ -637,12 +722,15 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): ) ) - show_backup_full = self.show_pb(self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb(self.backup_dir, 'node', backup_id_page) + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_page = self.show_pb( + self.backup_dir, 'node', backup_id_page) self.assertGreater( - show_backup_page["data-bytes"], show_backup_full["data-bytes"], - "ERROR: Size of incremental backup greater as full. \n INFO: {0} >{1}".format( + show_backup_page["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( show_backup_page["data-bytes"], show_backup_full["data-bytes"] ) @@ -651,19 +739,32 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): # CHECK FULL BACKUP self.node.stop() self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name), ignore_errors=True) - self.restore_node(self.backup_dir, 'node', self.node, backup_id=backup_id_full, options=["-j", "4"]) + shutil.rmtree( + self.get_tblspace_path(self.node, tblspace_name), + ignore_errors=True) + self.restore_node( + self.backup_dir, 'node', self.node, + backup_id=backup_id_full, options=["-j", "4"]) self.node.start() - self.assertEqual(full_result, self.node.safe_psql("postgres", "SELECT * FROM t_heap"), 'Lost data after restore') + self.assertEqual( + full_result, + self.node.safe_psql("postgres", "SELECT * FROM t_heap"), + 'Lost data after restore') # CHECK PAGE BACKUP self.node.stop() self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name), ignore_errors=True) - self.restore_node(self.backup_dir, 'node', self.node, backup_id=backup_id_page, options=["-j", "4"]) + shutil.rmtree( + self.get_tblspace_path(self.node, tblspace_name), + ignore_errors=True) + self.restore_node( + self.backup_dir, 'node', self.node, + backup_id=backup_id_page, options=["-j", "4"]) self.node.start() - self.assertEqual(page_result, self.node.safe_psql("postgres", "SELECT * FROM t_heap"), 'Lost data after restore') - + self.assertEqual( + page_result, + self.node.safe_psql("postgres", "SELECT * FROM t_heap"), + 'Lost data after restore') # @unittest.expectedFailure # @unittest.skip("skip") @@ -682,25 +783,30 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - "CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,10050000) i".format('t_heap_1', tblspace_name_1) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format( + 't_heap_1', tblspace_name_1) ) self.node.safe_psql( "postgres", - "CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,10050000) i".format('t_heap_2', tblspace_name_2) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format( + 't_heap_2', tblspace_name_2) ) - full_result_1 = self.node.safe_psql("postgres", "SELECT * FROM t_heap_1") - full_result_2 = self.node.safe_psql("postgres", "SELECT * FROM t_heap_2") + full_result_1 = self.node.safe_psql( + "postgres", "SELECT * FROM t_heap_1") + full_result_2 = self.node.safe_psql( + "postgres", "SELECT * FROM t_heap_2") try: - backup_id_full = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -711,25 +817,30 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'INSERT INTO {0} \ - SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,10050000) i'.format('t_heap_1') + "INSERT INTO {0} " + "SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format( + 't_heap_1') ) self.node.safe_psql( "postgres", - 'INSERT INTO {0} \ - SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,10050000) i'.format('t_heap_2') + "INSERT INTO {0} " + "SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format( + 't_heap_2') ) - page_result_1 = self.node.safe_psql("postgres", "SELECT * FROM t_heap_1") - page_result_2 = self.node.safe_psql("postgres", "SELECT * FROM t_heap_2") + page_result_1 = self.node.safe_psql( + "postgres", "SELECT * FROM t_heap_1") + page_result_2 = self.node.safe_psql( + "postgres", "SELECT * FROM t_heap_2") try: - backup_id_page = self.backup_node(self.backup_dir, 'node', self.node, backup_type='page') + backup_id_page = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') except ProbackupException as e: self.fail( "ERROR: Incremental backup failed.\n {0} \n {1}".format( @@ -738,12 +849,15 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): ) ) - show_backup_full = self.show_pb(self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb(self.backup_dir, 'node', backup_id_page) + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_page = self.show_pb( + self.backup_dir, 'node', backup_id_page) self.assertGreater( - show_backup_page["data-bytes"], show_backup_full["data-bytes"], - "ERROR: Size of incremental backup greater as full. \n INFO: {0} >{1}".format( + show_backup_page["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( show_backup_page["data-bytes"], show_backup_full["data-bytes"] ) @@ -752,27 +866,56 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): # CHECK FULL BACKUP self.node.stop() self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name), ignore_errors=True) - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name_1), ignore_errors=True) - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name_2), ignore_errors=True) - self.restore_node(self.backup_dir, 'node', self.node, backup_id=backup_id_full, options=["-j", "4"]) + shutil.rmtree( + self.get_tblspace_path(self.node, tblspace_name), + ignore_errors=True) + shutil.rmtree( + self.get_tblspace_path(self.node, tblspace_name_1), + ignore_errors=True) + shutil.rmtree( + self.get_tblspace_path(self.node, tblspace_name_2), + ignore_errors=True) + + self.restore_node( + self.backup_dir, 'node', self.node, + backup_id=backup_id_full, options=["-j", "4"]) self.node.start() - self.assertEqual(full_result_1, self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), 'Lost data after restore') - self.assertEqual(full_result_2, self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), 'Lost data after restore') + self.assertEqual( + full_result_1, + self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), + 'Lost data after restore') + self.assertEqual( + full_result_2, + self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), + 'Lost data after restore') # CHECK PAGE BACKUP self.node.stop() self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name), ignore_errors=True) - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name_1), ignore_errors=True) - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name_2), ignore_errors=True) - self.restore_node(self.backup_dir, 'node', self.node, backup_id=backup_id_page, options=["-j", "4"]) + shutil.rmtree( + self.get_tblspace_path(self.node, tblspace_name), + ignore_errors=True) + shutil.rmtree( + self.get_tblspace_path(self.node, tblspace_name_1), + ignore_errors=True) + shutil.rmtree( + self.get_tblspace_path(self.node, tblspace_name_2), + ignore_errors=True) + + self.restore_node( + self.backup_dir, 'node', self.node, + backup_id=backup_id_page, options=["-j", "4"]) self.node.start() - self.assertEqual(page_result_1, self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), 'Lost data after restore') - self.assertEqual(page_result_2, self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), 'Lost data after restore') + self.assertEqual( + page_result_1, + self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), + 'Lost data after restore') + self.assertEqual( + page_result_2, + self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), + 'Lost data after restore') - - # @unittest.expectedFailure + @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_fullbackup_after_create_table_page_after_create_table_stream(self): @@ -784,15 +927,17 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,1005000) i".format('t1', tblspace_name) ) backup_id_full = None try: - backup_id_full = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full', options=['--stream']) + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='full', options=['--stream']) except ProbackupException as e: self.fail( "ERROR: Full backup failed.\n {0} \n {1}".format( @@ -803,15 +948,17 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,25) i'.format('t2', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,10) i".format('t2', tblspace_name) ) backup_id_page = None try: - backup_id_page = self.backup_node(self.backup_dir, 'node', self.node, backup_type='page', options=['--stream']) + backup_id_page = self.backup_node( + self.backup_dir, 'node', self.node, + backup_type='page', options=['--stream']) except ProbackupException as e: self.fail( "ERROR: Incremental backup failed.\n {0} \n {1}".format( @@ -820,31 +967,36 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): ) ) - show_backup_full = self.show_pb(self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb(self.backup_dir, 'node', backup_id_page) + show_backup_full = self.show_pb( + self.backup_dir, 'node', backup_id_full) + show_backup_page = self.show_pb( + self.backup_dir, 'node', backup_id_page) self.assertGreater( - show_backup_page["data-bytes"], show_backup_full["data-bytes"], - "ERROR: Size of incremental backup greater as full. \n INFO: {0} >{1}".format( + show_backup_page["data-bytes"], + "ERROR: Size of incremental backup greater than full. \n " + "INFO: {0} >{1}".format( show_backup_page["data-bytes"], show_backup_full["data-bytes"] ) ) # --- Make backup with not valid data(broken .cfm) --- # - # @unittest.expectedFailure + @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_delete_random_cfm_file_from_tablespace_dir(self): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) ) - list_cmf = find_by_extensions([self.get_tblspace_path(self.node,tblspace_name)],['.cfm']) + list_cmf = find_by_extensions( + [self.get_tblspace_path(self.node, tblspace_name)], + ['.cfm']) self.assertTrue( list_cmf, "ERROR: .cfm-files not found into tablespace dir" @@ -854,33 +1006,46 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertRaises( ProbackupException, - self.backup_node,self.backup_dir, 'node', self.node, backup_type='full' + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' ) - # @unittest.expectedFailure + @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_delete_file_pg_compression_from_tablespace_dir(self): - os.remove(find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression'])[0]) + os.remove( + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression'])[0]) self.assertRaises( ProbackupException, - self.backup_node,self.backup_dir, 'node', self.node, backup_type='full' + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' ) - # @unittest.expectedFailure + @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_delete_random_data_file_from_tablespace_dir(self): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) ) - list_data_files = find_by_pattern([self.get_tblspace_path(self.node,tblspace_name)],'^.*/\d+$') + list_data_files = find_by_pattern( + [self.get_tblspace_path(self.node, tblspace_name)], + '^.*/\d+$') self.assertTrue( list_data_files, "ERROR: Files of data not found into tablespace dir" @@ -890,22 +1055,28 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertRaises( ProbackupException, - self.backup_node,self.backup_dir, 'node', self.node, backup_type='full' + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' ) - # @unittest.expectedFailure + @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_broken_random_cfm_file_into_tablespace_dir(self): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) ) - list_cmf = find_by_extensions([self.get_tblspace_path(self.node, tblspace_name)],['.cfm']) + list_cmf = find_by_extensions( + [self.get_tblspace_path(self.node, tblspace_name)], + ['.cfm']) self.assertTrue( list_cmf, "ERROR: .cfm-files not found into tablespace dir" @@ -915,22 +1086,28 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertRaises( ProbackupException, - self.backup_node,self.backup_dir, 'node', self.node, backup_type='full' + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' ) - # @unittest.expectedFailure + @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_broken_random_data_file_into_tablespace_dir(self): self.node.safe_psql( "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,256) i'.format('t1', tblspace_name) + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) ) - list_data_files = find_by_pattern([self.get_tblspace_path(self.node,tblspace_name)],'^.*/\d+$') + list_data_files = find_by_pattern( + [self.get_tblspace_path(self.node, tblspace_name)], + '^.*/\d+$') self.assertTrue( list_data_files, "ERROR: Files of data not found into tablespace dir" @@ -940,15 +1117,21 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertRaises( ProbackupException, - self.backup_node,self.backup_dir, 'node', self.node, backup_type='full' + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' ) - # @unittest.expectedFailure + @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_broken_file_pg_compression_into_tablespace_dir(self): - corrupted_file = find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression'])[0] + corrupted_file = find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression'])[0] self.assertTrue( corrupt_file(corrupted_file), @@ -957,14 +1140,18 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): self.assertRaises( ProbackupException, - self.backup_node,self.backup_dir, 'node', self.node, backup_type='full' + self.backup_node, + self.backup_dir, + 'node', + self.node, + backup_type='full' ) - # --- End ---# - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def tearDown(self): - self.node.cleanup() - self.del_test_dir(module_name, self.fname) +# # --- End ---# +# @unittest.skipUnless(ProbackupTest.enterprise, 'skip') +# def tearDown(self): +# self.node.cleanup() +# self.del_test_dir(module_name, self.fname) #class CfsBackupEncTest(CfsBackupNoEncTest):