1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-01-05 13:20:31 +02:00

tests: new module "exclude.py" and minor fixes

This commit is contained in:
Grigory Smolkin 2017-11-30 09:14:46 +03:00
parent b16dd21617
commit b269c84238
7 changed files with 203 additions and 46 deletions

View File

@ -7,19 +7,24 @@ from . import init_test, option_test, show_test, \
ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
ptrack_vacuum_full, ptrack_vacuum_truncate, pgpro560, pgpro589, \
false_positive, replica, compression, page, ptrack, archive, \
cfs_backup, cfs_restore, cfs_validate_backup
exclude, cfs_backup, cfs_restore, cfs_validate_backup
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromModule(archive))
suite.addTests(loader.loadTestsFromModule(backup_test))
# suite.addTests(loader.loadTestsFromModule(cfs_backup))
# suite.addTests(loader.loadTestsFromModule(cfs_restore))
# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup))
# suite.addTests(loader.loadTestsFromModule(logging))
suite.addTests(loader.loadTestsFromModule(compression))
suite.addTests(loader.loadTestsFromModule(delete_test))
suite.addTests(loader.loadTestsFromModule(exclude))
suite.addTests(loader.loadTestsFromModule(false_positive))
suite.addTests(loader.loadTestsFromModule(init_test))
suite.addTests(loader.loadTestsFromModule(option_test))
suite.addTests(loader.loadTestsFromModule(show_test))
suite.addTests(loader.loadTestsFromModule(backup_test))
suite.addTests(loader.loadTestsFromModule(delete_test))
suite.addTests(loader.loadTestsFromModule(restore_test))
suite.addTests(loader.loadTestsFromModule(validate_test))
suite.addTests(loader.loadTestsFromModule(retention_test))
suite.addTests(loader.loadTestsFromModule(page))
suite.addTests(loader.loadTestsFromModule(ptrack))
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
@ -31,15 +36,12 @@ def load_tests(loader, tests, pattern):
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_full))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_truncate))
suite.addTests(loader.loadTestsFromModule(replica))
suite.addTests(loader.loadTestsFromModule(restore_test))
suite.addTests(loader.loadTestsFromModule(retention_test))
suite.addTests(loader.loadTestsFromModule(show_test))
suite.addTests(loader.loadTestsFromModule(validate_test))
suite.addTests(loader.loadTestsFromModule(pgpro560))
suite.addTests(loader.loadTestsFromModule(pgpro589))
suite.addTests(loader.loadTestsFromModule(false_positive))
suite.addTests(loader.loadTestsFromModule(compression))
suite.addTests(loader.loadTestsFromModule(page))
suite.addTests(loader.loadTestsFromModule(archive))
# suite.addTests(loader.loadTestsFromModule(cfs_backup))
# suite.addTests(loader.loadTestsFromModule(cfs_restore))
# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup))
return suite

View File

@ -93,7 +93,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
#@unittest.skip("skip")
# @unittest.skip("skip")
def test_incremental_backup_without_full(self):
"""page-level backup without validated full backup"""
fname = self.id().split('.')[3]
@ -134,7 +134,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_incremental_backup_corrupt_full(self):
"""page-level backup with corrupted full backup"""
fname = self.id().split('.')[3]

View File

@ -151,7 +151,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
"ERROR: .cfm files not found in backup dir"
)
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
# PGPRO-1018 invalid file size
def test_fullbackup_after_create_table_stream(self):
@ -193,7 +193,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
)
# --- Section: Incremental from empty tablespace --- #
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_fullbackup_empty_tablespace_ptrack_after_create_table(self):
"""
@ -244,7 +244,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
"ERROR: .cfm files not found in backup dir"
)
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_fullbackup_empty_tablespace_ptrack_after_create_table_stream(self):
"""
@ -406,7 +406,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
)
# --- Section: Incremental from fill tablespace --- #
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_fullbackup_after_create_table_ptrack_after_create_table(self):
"""
@ -464,7 +464,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
)
)
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_fullbackup_after_create_table_ptrack_after_create_table_stream(self):
"""
@ -822,7 +822,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
)
# --- Make backup with not valid data(broken .cfm) --- #
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_delete_random_cfm_file_from_tablespace_dir(self):
self.node.safe_psql(
@ -846,7 +846,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
self.backup_node,self.backup_dir, 'node', self.node, backup_type='full'
)
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_delete_file_pg_compression_from_tablespace_dir(self):
os.remove(find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression'])[0])
@ -856,7 +856,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
self.backup_node,self.backup_dir, 'node', self.node, backup_type='full'
)
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_delete_random_data_file_from_tablespace_dir(self):
self.node.safe_psql(
@ -880,7 +880,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
self.backup_node,self.backup_dir, 'node', self.node, backup_type='full'
)
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_broken_random_cfm_file_into_tablespace_dir(self):
self.node.safe_psql(
@ -904,7 +904,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
self.backup_node,self.backup_dir, 'node', self.node, backup_type='full'
)
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_broken_random_data_file_into_tablespace_dir(self):
self.node.safe_psql(
@ -928,7 +928,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
self.backup_node,self.backup_dir, 'node', self.node, backup_type='full'
)
@unittest.expectedFailure
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_broken_file_pg_compression_into_tablespace_dir(self):

View File

@ -1,15 +0,0 @@
class Foo(object):
def __init__(self, *value1, **value2):
# do something with the values
print 'I think something is being called here'
# print value1, value2
class MyFoo(Foo):
def __init__(self, *args, **kwargs):
# do something else, don't care about the args
print args, kwargs
super(MyFoo, self).__init__(*args, **kwargs)
foo = MyFoo('Python', 2.7, stack='overflow', ololo='lalala')

168
tests/exclude.py Normal file
View File

@ -0,0 +1,168 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
module_name = 'exclude'
class ExcludeTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_exclude_temp_tables(self):
"""make node without archiving, create temp table, take full backup, check that temp table not present in backup catalogue"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'shared_buffers': '1GB',
"fsync": "off", 'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
conn = node.connect()
with node.connect("postgres") as conn:
conn.execute("create temp table test as select generate_series(0,50050000)::text")
conn.commit()
temp_schema_name = conn.execute("SELECT nspname FROM pg_namespace WHERE oid = pg_my_temp_schema()")[0][0]
conn.commit()
temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace("pg_", "")
conn.commit()
conn.execute("create index test_idx on test (generate_series)")
conn.commit()
heap_path = conn.execute("select pg_relation_filepath('test')")[0][0]
conn.commit()
index_path = conn.execute("select pg_relation_filepath('test_idx')")[0][0]
conn.commit()
heap_oid = conn.execute("select 'test'::regclass::oid")[0][0]
conn.commit()
toast_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0]
conn.commit()
toast_idx_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid) + "_index"))[0][0]
conn.commit()
temp_table_filename = os.path.basename(heap_path)
temp_idx_filename = os.path.basename(index_path)
temp_toast_filename = os.path.basename(toast_path)
temp_idx_toast_filename = os.path.basename(toast_idx_path)
self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream'])
for root, dirs, files in os.walk(backup_dir):
for file in files:
if file in [temp_table_filename, temp_table_filename + ".1",
temp_idx_filename,
temp_idx_filename + ".1",
temp_toast_filename,
temp_toast_filename + ".1",
temp_idx_toast_filename,
temp_idx_toast_filename + ".1"]:
self.assertEqual(1, 0, "Found temp table file in backup catalogue.\n Filepath: {0}".format(file))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_exclude_unlogged_tables(self):
"""make node without archiving, create temp table, take full backup, check that temp table not present in backup catalogue"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', "shared_buffers": "1GB", "fsync": "off", 'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
conn = node.connect()
with node.connect("postgres") as conn:
conn.execute("create unlogged table test as select generate_series(0,50050000)::text")
conn.commit()
conn.execute("create index test_idx on test (generate_series)")
conn.commit()
heap_path = conn.execute("select pg_relation_filepath('test')")[0][0]
conn.commit()
index_path = conn.execute("select pg_relation_filepath('test_idx')")[0][0]
conn.commit()
index_init_path = index_path + "_init"
heap_oid = conn.execute("select 'test'::regclass::oid")[0][0]
conn.commit()
toast_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format("pg_toast", "pg_toast_" + str(heap_oid)))[0][0]
conn.commit()
toast_init_path = toast_path + "_init"
toast_idx_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format("pg_toast", "pg_toast_" + str(heap_oid) + "_index"))[0][0]
conn.commit()
toast_index_idx_path = toast_idx_path + "_init"
unlogged_heap_filename = os.path.basename(heap_path)
unlogged_heap_init_filename = unlogged_heap_filename + "_init"
unlogged_idx_filename = os.path.basename(index_path)
unlogged_idx_init_filename = unlogged_idx_filename + "_init"
unlogged_toast_filename = os.path.basename(toast_path)
unlogged_toast_init_filename = unlogged_toast_filename + "_init"
unlogged_idx_toast_filename = os.path.basename(toast_idx_path)
unlogged_idx_toast_init_filename = unlogged_idx_toast_filename + "_init"
self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream'])
found_unlogged_heap_init = False
found_unlogged_idx_init = False
found_unlogged_toast = False
found_unlogged_idx_toast_init = False
for root, dirs, files in os.walk(backup_dir):
for file in files:
if file in [unlogged_heap_filename, unlogged_heap_filename + ".1",
unlogged_idx_filename,
unlogged_idx_filename + ".1",
unlogged_toast_filename,
unlogged_toast_filename + ".1",
unlogged_idx_toast_filename,
unlogged_idx_toast_filename + ".1"]:
self.assertTrue(False, "Found unlogged table file in backup catalogue.\n Filepath: {0}".format(file))
if file == unlogged_heap_init_filename:
found_unlogged_heap_init = True
if file == unlogged_idx_init_filename:
found_unlogged_idx_init = True
if file == unlogged_toast_init_filename:
found_unlogged_toast = True
if file == unlogged_idx_toast_init_filename:
found_unlogged_idx_toast_init = True
self.assertTrue(found_unlogged_heap_init, "{0} is not found in backup catalogue".format(unlogged_heap_init_filename));
self.assertTrue(found_unlogged_idx_init, "{0} is not found in backup catalogue".format(unlogged_idx_init_filename));
self.assertTrue(found_unlogged_toast, "{0} is not found in backup catalogue".format(unlogged_toast_filename));
self.assertTrue(found_unlogged_idx_toast_init, "{0} is not found in backup catalogue".format(unlogged_idx_toast_init_filename));
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -469,6 +469,8 @@ class ProbackupTest(object):
for i in header_split:
if i == '':
header_split.remove(i)
continue
header_split = [header_element.rstrip() for header_element in header_split]
for backup_record in body:
# split string in list with string for every backup record element
backup_record_split = re.split(" +", backup_record)
@ -669,7 +671,7 @@ class ProbackupTest(object):
for file in original_pgdata['files']:
if file in restored_pgdata['files']:
if original_pgdata['files'][file] != restored_pgdata['files'][file]:
error_message += '\nChecksumm mismatch.\n File_old: {0}\n Checksumm_old: {1}\n File_new: {2}\n Checksumm_mew: {3}\n'.format(
error_message += '\nChecksumm mismatch.\n File_old: {0}\n Checksumm_old: {1}\n File_new: {2}\n Checksumm_new: {3}\n'.format(
os.path.join(original_pgdata['pgdata'], file),
original_pgdata['files'][file],
os.path.join(restored_pgdata['pgdata'], file),

View File

@ -73,14 +73,14 @@ class CheckSystemID(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'node1', node1)
try:
self.backup_node(backup_dir, 'node1', node1, data_dir=node2.data_dir, options=['--stream'])
self.backup_node(backup_dir, 'node1', node2, options=['--stream'])
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of of SYSTEM ID mismatch.\n Output: {0} \n CMD: {1}".format(
self.assertEqual(1, 0, "Expecting Error because of SYSTEM ID mismatch.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: Backup data directory was initialized for system id' in e.message
and 'but target backup directory system id is' in e.message,
and 'but connected instance system id is' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
try: