1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-07-16 07:14:15 +02:00

[Issue #169] test coverage

This commit is contained in:
Grigory Smolkin
2020-02-21 22:19:52 +03:00
parent ba8c4caae0
commit b15c7f1587
7 changed files with 667 additions and 127 deletions

View File

@ -1023,6 +1023,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# FULL backup # FULL backup
self.backup_node(backup_dir, 'node', node, options=['--stream']) self.backup_node(backup_dir, 'node', node, options=['--stream'])
node.safe_psql(
"postgres",
"insert into t_heap select i"
" as id from generate_series(101,102) i")
# PAGE backup # PAGE backup
gdb = self.backup_node( gdb = self.backup_node(
backup_dir, 'node', node, backup_type='page', backup_dir, 'node', node, backup_type='page',
@ -1039,11 +1044,10 @@ class BackupTest(ProbackupTest, unittest.TestCase):
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f: backup_id = self.show_pb(backup_dir, 'node')[1]['id']
log_content = f.read()
self.assertTrue( filelist = self.get_backup_filelist(backup_dir, 'node', backup_id)
'LOG: File "{0}" is not found'.format(absolute_path) in log_content, self.assertNotIn(relative_path, filelist)
'File "{0}" should be deleted but it`s not'.format(absolute_path))
node.cleanup() node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
@ -1347,7 +1351,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, gdb=True, backup_dir, 'node', node, gdb=True,
options=['--stream', '--log-level-file=LOG']) options=['--stream', '--log-level-file=LOG'])
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(20) gdb.continue_execution_until_break(20)
@ -1385,7 +1389,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, gdb=True, backup_dir, 'node', node, gdb=True,
options=['--stream', '--log-level-file=LOG']) options=['--stream', '--log-level-file=LOG'])
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(20) gdb.continue_execution_until_break(20)
@ -1422,7 +1426,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
gdb = self.backup_node( gdb = self.backup_node(
backup_dir, 'node', node, gdb=True, options=['--stream']) backup_dir, 'node', node, gdb=True, options=['--stream'])
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(20) gdb.continue_execution_until_break(20)
@ -1513,7 +1517,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd)) repr(self.output), self.cmd))
except ProbackupException as e: except ProbackupException as e:
self.assertIn( self.assertIn(
'ERROR: cannot open file', 'ERROR: Cannot open file',
e.message, e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format( '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd)) repr(e.message), self.cmd))

View File

@ -85,8 +85,8 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgbench = node.pgbench( pgbench = node.pgbench(
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
options=["-c", "4", "-T", "20"] options=["-c", "4", "-T", "20"])
)
pgbench.wait() pgbench.wait()
pgbench.stdout.close() pgbench.stdout.close()
@ -105,6 +105,44 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgdata_restored = self.pgdata_content(node_restored.data_dir) pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored) self.compare_pgdata(pgdata, pgdata_restored)
node.safe_psql(
'postgres',
'create table tmp as select * from pgbench_accounts where aid < 1000')
node.safe_psql(
'postgres',
'delete from pgbench_accounts')
node.safe_psql(
'postgres',
'VACUUM')
self.backup_node(backup_dir, 'node', node, backup_type='page')
pgdata = self.pgdata_content(node.data_dir)
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored, options=["-j", "4"])
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node.safe_psql(
'postgres',
'insert into pgbench_accounts select * from pgbench_accounts')
self.backup_node(backup_dir, 'node', node, backup_type='page')
pgdata = self.pgdata_content(node.data_dir)
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored, options=["-j", "4"])
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -118,8 +156,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
base_dir=os.path.join(module_name, fname, 'node'), base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={ pg_options={'autovacuum': 'off'})
'autovacuum': 'off'})
self.init_pb(backup_dir, old_binary=True) self.init_pb(backup_dir, old_binary=True)
self.show_pb(backup_dir) self.show_pb(backup_dir)
@ -189,8 +226,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgbench.wait() pgbench.wait()
pgbench.stdout.close() pgbench.stdout.close()
self.backup_node( self.backup_node(backup_dir, 'node', node, backup_type='delta')
backup_dir, 'node', node, backup_type='delta')
if self.paranoia: if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
@ -204,6 +240,44 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgdata_restored = self.pgdata_content(node_restored.data_dir) pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored) self.compare_pgdata(pgdata, pgdata_restored)
node.safe_psql(
'postgres',
'create table tmp as select * from pgbench_accounts where aid < 1000')
node.safe_psql(
'postgres',
'delete from pgbench_accounts')
node.safe_psql(
'postgres',
'VACUUM')
self.backup_node(backup_dir, 'node', node, backup_type='delta')
pgdata = self.pgdata_content(node.data_dir)
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored, options=["-j", "4"])
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node.safe_psql(
'postgres',
'insert into pgbench_accounts select * from pgbench_accounts')
self.backup_node(backup_dir, 'node', node, backup_type='delta')
pgdata = self.pgdata_content(node.data_dir)
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored, options=["-j", "4"])
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -530,3 +604,87 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_backward_compatibility_merge_1(self):
"""
Create node, take FULL and PAGE backups with old binary,
merge them with new binary.
old binary version =< 2.2.7
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'autovacuum': 'off'})
self.init_pb(backup_dir, old_binary=True)
self.add_instance(backup_dir, 'node', node, old_binary=True)
self.set_archiving(backup_dir, 'node', node, old_binary=True)
node.slow_start()
node.pgbench_init(scale=1)
# FULL backup with OLD binary
self.backup_node(
backup_dir, 'node', node,
old_binary=True)
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
options=["-c", "4", "-T", "10"])
pgbench.wait()
pgbench.stdout.close()
# PAGE1 backup with OLD binary
backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='page', old_binary=True)
node.safe_psql(
'postgres',
'DELETE from pgbench_accounts')
node.safe_psql(
'postgres',
'VACUUM pgbench_accounts')
# PAGE2 backup with OLD binary
backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='page', old_binary=True)
# PAGE3 backup with OLD binary
backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='page', old_binary=True)
pgdata = self.pgdata_content(node.data_dir)
# merge chain created by old binary with new binary
output = self.merge_backup(
backup_dir, "node", backup_id)
# check that in-place is disabled
self.assertIn(
"WARNING: In-place merge is disabled "
"because of program versions mismatch", output)
# restore merged backup
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored, options=["-j", "4"])
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -833,6 +833,9 @@ class ProbackupTest(object):
if backup_type: if backup_type:
cmd_list += ['-b', backup_type] cmd_list += ['-b', backup_type]
if not old_binary:
cmd_list += ['--no-sync']
return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary, return_id) return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary, return_id)
def checkdb_node( def checkdb_node(
@ -889,6 +892,9 @@ class ProbackupTest(object):
if backup_id: if backup_id:
cmd_list += ['-i', backup_id] cmd_list += ['-i', backup_id]
if not old_binary:
cmd_list += ['--no-sync']
return self.run_pb(cmd_list + options, old_binary=old_binary) return self.run_pb(cmd_list + options, old_binary=old_binary)
def show_pb( def show_pb(

View File

@ -33,7 +33,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
gdb = self.backup_node( gdb = self.backup_node(
backup_dir, 'node', node, gdb=True) backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(20) gdb.continue_execution_until_break(20)
@ -87,7 +87,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
gdb = self.backup_node( gdb = self.backup_node(
backup_dir, 'node', node, gdb=True) backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(20) gdb.continue_execution_until_break(20)
@ -156,7 +156,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
gdb = self.backup_node( gdb = self.backup_node(
backup_dir, 'node', node, gdb=True) backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(20) gdb.continue_execution_until_break(20)
@ -253,7 +253,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
gdb = self.backup_node( gdb = self.backup_node(
backup_dir, 'node', node, gdb=True) backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(20) gdb.continue_execution_until_break(20)
@ -399,7 +399,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
restore_id) in e.message and restore_id) in e.message and
'is using backup {0} and still is running'.format( 'is using backup {0} and still is running'.format(
backup_id) in e.message and backup_id) in e.message and
'ERROR: Cannot lock backup directory' in e.message, 'ERROR: Cannot lock backup' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format( '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd)) repr(e.message), self.cmd))

View File

@ -31,7 +31,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
node.slow_start() node.slow_start()
# Do full backup # Do full backup
self.backup_node(backup_dir, "node", node) self.backup_node(backup_dir, "node", node, options=['--compress'])
show_backup = self.show_pb(backup_dir, "node")[0] show_backup = self.show_pb(backup_dir, "node")[0]
self.assertEqual(show_backup["status"], "OK") self.assertEqual(show_backup["status"], "OK")
@ -45,7 +45,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
conn.commit() conn.commit()
# Do first page backup # Do first page backup
self.backup_node(backup_dir, "node", node, backup_type="page") self.backup_node(backup_dir, "node", node, backup_type="page", options=['--compress'])
show_backup = self.show_pb(backup_dir, "node")[1] show_backup = self.show_pb(backup_dir, "node")[1]
# sanity check # sanity check
@ -60,7 +60,9 @@ class MergeTest(ProbackupTest, unittest.TestCase):
conn.commit() conn.commit()
# Do second page backup # Do second page backup
self.backup_node(backup_dir, "node", node, backup_type="page") self.backup_node(
backup_dir, "node", node,
backup_type="page", options=['--compress'])
show_backup = self.show_pb(backup_dir, "node")[2] show_backup = self.show_pb(backup_dir, "node")[2]
page_id = show_backup["id"] page_id = show_backup["id"]
@ -1047,7 +1049,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file_internal')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(5) gdb.continue_execution_until_break(5)
@ -1068,7 +1070,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@unittest.skip("skip") # @unittest.skip("skip")
def test_continue_failed_merge_with_corrupted_delta_backup(self): def test_continue_failed_merge_with_corrupted_delta_backup(self):
""" """
Fail merge via gdb, corrupt DELTA backup, try to continue merge Fail merge via gdb, corrupt DELTA backup, try to continue merge
@ -1121,7 +1123,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# Failed MERGE # Failed MERGE
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file_internal')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(2) gdb.continue_execution_until_break(2)
@ -1158,7 +1160,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd)) repr(self.output), self.cmd))
except ProbackupException as e: except ProbackupException as e:
self.assertTrue( self.assertTrue(
"ERROR: Merging of backup {0} failed".format( "ERROR: Backup {0} has status CORRUPT, merge is aborted".format(
backup_id) in e.message, backup_id) in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format( '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd)) repr(e.message), self.cmd))
@ -1217,8 +1219,12 @@ class MergeTest(ProbackupTest, unittest.TestCase):
gdb.run_until_break() gdb.run_until_break()
gdb._execute('thread apply all bt')
gdb.continue_execution_until_break(20) gdb.continue_execution_until_break(20)
gdb._execute('thread apply all bt')
gdb._execute('signal SIGKILL') gdb._execute('signal SIGKILL')
print(self.show_pb(backup_dir, as_text=True, as_json=False)) print(self.show_pb(backup_dir, as_text=True, as_json=False))
@ -1234,8 +1240,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_continue_failed_merge_3(self): def test_continue_failed_merge_3(self):
""" """
Check that failed MERGE can`t be continued after target backup deleting Check that failed MERGE cannot be continued if intermediate
Create FULL and 2 PAGE backups backup is missing.
""" """
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -1297,14 +1303,14 @@ class MergeTest(ProbackupTest, unittest.TestCase):
gdb = self.merge_backup(backup_dir, "node", backup_id_merge, gdb=True) gdb = self.merge_backup(backup_dir, "node", backup_id_merge, gdb=True)
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file_internal')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(2) gdb.continue_execution_until_break(2)
gdb._execute('signal SIGKILL') gdb._execute('signal SIGKILL')
print(self.show_pb(backup_dir, as_text=True, as_json=False)) print(self.show_pb(backup_dir, as_text=True, as_json=False))
print(os.path.join(backup_dir, "backups", "node", backup_id_delete)) # print(os.path.join(backup_dir, "backups", "node", backup_id_delete))
# DELETE PAGE1 # DELETE PAGE1
shutil.rmtree( shutil.rmtree(
@ -1320,8 +1326,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd)) repr(self.output), self.cmd))
except ProbackupException as e: except ProbackupException as e:
self.assertTrue( self.assertTrue(
"ERROR: Parent full backup for the given backup {0} was not found".format( "ERROR: Incremental chain is broken, "
backup_id_merge) in e.message, "merge is impossible to finish" in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format( '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd)) repr(e.message), self.cmd))
@ -1545,7 +1551,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
backup_dir, 'backups', backup_dir, 'backups',
'node', full_id, 'database', fsm_path) 'node', full_id, 'database', fsm_path)
print(file_to_remove) # print(file_to_remove)
os.remove(file_to_remove) os.remove(file_to_remove)
@ -1701,9 +1707,6 @@ class MergeTest(ProbackupTest, unittest.TestCase):
gdb.set_breakpoint('delete_backup_files') gdb.set_breakpoint('delete_backup_files')
gdb.run_until_break() gdb.run_until_break()
gdb.set_breakpoint('parray_bsearch')
gdb.continue_execution_until_break()
gdb.set_breakpoint('pgFileDelete') gdb.set_breakpoint('pgFileDelete')
gdb.continue_execution_until_break(20) gdb.continue_execution_until_break(20)
@ -1711,7 +1714,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# backup half-merged # backup half-merged
self.assertEqual( self.assertEqual(
'MERGING', self.show_pb(backup_dir, 'node')[0]['status']) 'MERGED', self.show_pb(backup_dir, 'node')[0]['status'])
self.assertEqual( self.assertEqual(
full_id, self.show_pb(backup_dir, 'node')[0]['id']) full_id, self.show_pb(backup_dir, 'node')[0]['id'])
@ -1731,9 +1734,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd)) repr(self.output), self.cmd))
except ProbackupException as e: except ProbackupException as e:
self.assertTrue( self.assertTrue(
"ERROR: Parent full backup for the given " "ERROR: Full backup {0} has unfinished merge with backup {1}".format(
"backup {0} was not found".format( full_id, page_id) in e.message,
page_id_2) in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format( '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd)) repr(e.message), self.cmd))
@ -1764,7 +1766,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
page_1 = self.backup_node( page_1 = self.backup_node(
backup_dir, 'node', node, backup_type='page') backup_dir, 'node', node, backup_type='page')
# Change FULL B backup status to ERROR # Change PAGE1 backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_1, 'ERROR') self.change_backup_status(backup_dir, 'node', page_1, 'ERROR')
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
@ -1773,11 +1775,11 @@ class MergeTest(ProbackupTest, unittest.TestCase):
pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum']) pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum'])
pgbench.wait() pgbench.wait()
# take PAGE backup # take PAGE2 backup
page_id = self.backup_node( page_id = self.backup_node(
backup_dir, 'node', node, backup_type='page') backup_dir, 'node', node, backup_type='page')
# Change FULL B backup status to ERROR # Change PAGE1 backup status to OK
self.change_backup_status(backup_dir, 'node', page_1, 'OK') self.change_backup_status(backup_dir, 'node', page_1, 'OK')
gdb = self.merge_backup( gdb = self.merge_backup(
@ -1787,8 +1789,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
gdb.set_breakpoint('delete_backup_files') gdb.set_breakpoint('delete_backup_files')
gdb.run_until_break() gdb.run_until_break()
gdb.set_breakpoint('parray_bsearch') # gdb.set_breakpoint('parray_bsearch')
gdb.continue_execution_until_break() # gdb.continue_execution_until_break()
gdb.set_breakpoint('pgFileDelete') gdb.set_breakpoint('pgFileDelete')
gdb.continue_execution_until_break(30) gdb.continue_execution_until_break(30)
@ -1800,6 +1802,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# restore # restore
node.cleanup() node.cleanup()
try: try:
#self.restore_node(backup_dir, 'node', node, backup_id=page_1)
self.restore_node(backup_dir, 'node', node) self.restore_node(backup_dir, 'node', node)
self.assertEqual( self.assertEqual(
1, 0, 1, 0,
@ -1815,6 +1818,158 @@ class MergeTest(ProbackupTest, unittest.TestCase):
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
def test_failed_merge_after_delete_2(self):
"""
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'autovacuum': 'off'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# take FULL backup
full_id = self.backup_node(
backup_dir, 'node', node, options=['--stream'])
node.pgbench_init(scale=1)
page_1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# add data
pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum'])
pgbench.wait()
# take PAGE2 backup
page_2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
gdb = self.merge_backup(
backup_dir, 'node', page_2, gdb=True,
options=['--log-level-console=VERBOSE'])
gdb.set_breakpoint('pgFileDelete')
gdb.run_until_break()
gdb.continue_execution_until_break(2)
gdb._execute('signal SIGKILL')
self.delete_pb(backup_dir, 'node', backup_id=page_2)
# rerun merge
try:
#self.restore_node(backup_dir, 'node', node, backup_id=page_1)
self.merge_backup(backup_dir, 'node', page_1)
self.assertEqual(
1, 0,
"Expecting Error because of backup is missing.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Full backup {0} has unfinished merge "
"with backup {1}".format(full_id, page_2),
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.del_test_dir(module_name, fname)
def test_failed_merge_after_delete_3(self):
"""
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'autovacuum': 'off'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# add database
node.safe_psql(
'postgres',
'CREATE DATABASE testdb')
dboid = node.safe_psql(
"postgres",
"select oid from pg_database where datname = 'testdb'").rstrip()
# take FULL backup
full_id = self.backup_node(
backup_dir, 'node', node, options=['--stream'])
# drop database
node.safe_psql(
'postgres',
'DROP DATABASE testdb')
# take PAGE backup
page_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# create database
node.safe_psql(
'postgres',
'create DATABASE testdb')
page_id_2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
gdb = self.merge_backup(
backup_dir, 'node', page_id,
gdb=True, options=['--log-level-console=verbose'])
gdb.set_breakpoint('delete_backup_files')
gdb.run_until_break()
gdb.set_breakpoint('pgFileDelete')
gdb.continue_execution_until_break(20)
gdb._execute('signal SIGKILL')
# backup half-merged
self.assertEqual(
'MERGED', self.show_pb(backup_dir, 'node')[0]['status'])
self.assertEqual(
full_id, self.show_pb(backup_dir, 'node')[0]['id'])
db_path = os.path.join(
backup_dir, 'backups', 'node', full_id)
# FULL backup is missing now
shutil.rmtree(db_path)
try:
self.merge_backup(
backup_dir, 'node', page_id_2,
options=['--log-level-console=verbose'])
self.assertEqual(
1, 0,
"Expecting Error because of missing parent.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"ERROR: Failed to find parent full backup for {0}".format(
page_id_2) in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.del_test_dir(module_name, fname)
# @unittest.skip("skip") # @unittest.skip("skip")
def test_merge_backup_from_future(self): def test_merge_backup_from_future(self):
""" """
@ -2085,8 +2240,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd)) repr(self.output), self.cmd))
except ProbackupException as e: except ProbackupException as e:
self.assertTrue( self.assertTrue(
"ERROR: Parent full backup for the given " "ERROR: Failed to find parent full backup for {0}".format(
"backup {0} was not found".format(
page_id_a3) in e.message, page_id_a3) in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format( '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd)) repr(e.message), self.cmd))

View File

@ -928,7 +928,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
pg_options={ pg_options={
'autovacuum': 'off', 'autovacuum': 'off',
'checkpoint_timeout': '1h', 'checkpoint_timeout': '1h',
'wal_level': 'replica'}) 'wal_level': 'replica',
'shared_buffers': '128MB'})
if self.get_version(master) < self.version_to_num('9.6.0'): if self.get_version(master) < self.version_to_num('9.6.0'):
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -966,14 +967,14 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.switch_wal_segment(master) self.switch_wal_segment(master)
self.switch_wal_segment(master) self.switch_wal_segment(master)
self.wait_until_replica_catch_with_master(master, replica)
master.safe_psql( master.safe_psql(
'postgres', 'postgres',
'CREATE TABLE t1 AS ' 'CREATE TABLE t1 AS '
'SELECT i, repeat(md5(i::text),5006056) AS fat_attr ' 'SELECT i, repeat(md5(i::text),5006056) AS fat_attr '
'FROM generate_series(0,10) i') 'FROM generate_series(0,10) i')
self.wait_until_replica_catch_with_master(master, replica)
output = self.backup_node( output = self.backup_node(
backup_dir, 'replica', replica, backup_dir, 'replica', replica,
options=[ options=[

View File

@ -585,46 +585,22 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
options=['--retention-window=1', '--expired', '--merge-expired']) options=['--retention-window=1', '--expired', '--merge-expired'])
self.assertIn( self.assertIn(
"Merge incremental chain between FULL backup {0} and backup {1}".format( "Merge incremental chain between full backup {0} and backup {1}".format(
backup_id_a, page_id_a2), backup_id_a, page_id_a2),
output) output)
self.assertIn( self.assertIn(
"Merging backup {0} with backup {1}".format( "Rename merged full backup {0} to {1}".format(
page_id_a1, backup_id_a), output) backup_id_a, page_id_a2), output)
self.assertIn( self.assertIn(
"Rename {0} to {1}".format( "Merge incremental chain between full backup {0} and backup {1}".format(
backup_id_a, page_id_a1), output)
self.assertIn(
"Merging backup {0} with backup {1}".format(
page_id_a2, page_id_a1), output)
self.assertIn(
"Rename {0} to {1}".format(
page_id_a1, page_id_a2), output)
self.assertIn(
"Merge incremental chain between FULL backup {0} and backup {1}".format(
backup_id_b, page_id_b2), backup_id_b, page_id_b2),
output) output)
self.assertIn( self.assertIn(
"Merging backup {0} with backup {1}".format( "Rename merged full backup {0} to {1}".format(
page_id_b1, backup_id_b), output) backup_id_b, page_id_b2), output)
self.assertIn(
"Rename {0} to {1}".format(
backup_id_b, page_id_b1), output)
self.assertIn(
"Merging backup {0} with backup {1}".format(
page_id_b2, page_id_b1), output)
self.assertIn(
"Rename {0} to {1}".format(
page_id_b1, page_id_b2), output)
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
@ -979,64 +955,295 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
output = self.delete_expired( output = self.delete_expired(
backup_dir, 'node', backup_dir, 'node',
options=[ options=[
'--retention-window=1', '--expired', '--retention-window=1', '--delete-expired',
'--merge-expired', '--log-level-console=log'])
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
# Merging chain A
self.assertIn(
"Merge incremental chain between full backup {0} and backup {1}".format(
backup_id_a, page_id_a3),
output)
self.assertIn(
"INFO: Rename merged full backup {0} to {1}".format(
backup_id_a, page_id_a3), output)
# self.assertIn(
# "WARNING: Backup {0} has multiple valid descendants. "
# "Automatic merge is not possible.".format(
# page_id_a1), output)
self.assertIn(
"LOG: Consider backup {0} for purge".format(
page_id_a2), output)
# Merge chain B
self.assertIn(
"Merge incremental chain between full backup {0} and backup {1}".format(
backup_id_b, page_id_b3),
output)
self.assertIn(
"INFO: Rename merged full backup {0} to {1}".format(
backup_id_b, page_id_b3), output)
self.assertIn(
"Delete: {0}".format(page_id_a2), output)
self.assertEqual(
self.show_pb(backup_dir, 'node')[1]['id'],
page_id_b3)
self.assertEqual(
self.show_pb(backup_dir, 'node')[0]['id'],
page_id_a3)
self.assertEqual(
self.show_pb(backup_dir, 'node')[1]['backup-mode'],
'FULL')
self.assertEqual(
self.show_pb(backup_dir, 'node')[0]['backup-mode'],
'FULL')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_basic_window_merge_multiple_descendants_1(self):
"""
PAGEb3
| PAGEa3
-----------------------------retention window
PAGEb2 /
| PAGEa2 /
PAGEb1 \ /
| PAGEa1
FULLb |
FULLa
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'autovacuum': 'off'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.pgbench_init(scale=3)
# Take FULL BACKUPs
backup_id_a = self.backup_node(backup_dir, 'node', node)
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
backup_id_b = self.backup_node(backup_dir, 'node', node)
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# Change FULLb backup status to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
page_id_a1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# Change FULLb to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
page_id_b1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb1 OK
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# Change PAGEa1 to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
# Change PAGEb1 and FULLb to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb ERROR
# FULLa OK
page_id_a2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb ERROR
# FULLa OK
# Change PAGEb1 and FULLb to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa2 and FULLa to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa ERROR
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# PAGEb2 OK
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa ERROR
# Change PAGEb2 and PAGEb1 to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
# and FULL stuff
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# PAGEb2 ERROR
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb ERROR
# FULLa OK
page_id_a3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# PAGEa3 OK
# PAGEb2 ERROR
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb ERROR
# FULLa OK
# Change PAGEa3 to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR')
# Change PAGEb2, PAGEb1 and FULLb to OK
self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
page_id_b3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb3 OK
# PAGEa3 ERROR
# PAGEb2 OK
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEa3, PAGEa2 and PAGEb1 status to OK
self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
# PAGEb3 OK
# PAGEa3 OK
# PAGEb2 OK
# PAGEa2 OK
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1
self.assertEqual(
self.show_pb(
backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'],
page_id_a1)
self.assertEqual(
self.show_pb(
backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'],
page_id_a1)
# Purge backups
backups = os.path.join(backup_dir, 'backups', 'node')
for backup in os.listdir(backups):
if backup in [page_id_a3, page_id_b3, 'pg_probackup.conf']:
continue
with open(
os.path.join(
backups, backup, "backup.control"), "a") as conf:
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() - timedelta(days=3)))
output = self.delete_expired(
backup_dir, 'node',
options=[
'--retention-window=1',
'--merge-expired', '--log-level-console=log']) '--merge-expired', '--log-level-console=log'])
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3)
# Merging chain A # Merging chain A
self.assertIn( self.assertIn(
"Merge incremental chain between FULL backup {0} and backup {1}".format( "Merge incremental chain between full backup {0} and backup {1}".format(
backup_id_a, page_id_a3), backup_id_a, page_id_a3),
output) output)
self.assertIn( self.assertIn(
"Merging backup {0} with backup {1}".format( "INFO: Rename merged full backup {0} to {1}".format(
page_id_a1, backup_id_a), output) backup_id_a, page_id_a3), output)
self.assertIn( # self.assertIn(
"INFO: Rename {0} to {1}".format( # "WARNING: Backup {0} has multiple valid descendants. "
backup_id_a, page_id_a1), output) # "Automatic merge is not possible.".format(
# page_id_a1), output)
self.assertIn(
"WARNING: Backup {0} has multiple valid descendants. "
"Automatic merge is not possible.".format(
page_id_a1), output)
# Merge chain B # Merge chain B
self.assertIn( self.assertIn(
"Merge incremental chain between FULL backup {0} and backup {1}".format( "Merge incremental chain between full backup {0} and backup {1}".format(
backup_id_b, page_id_b3), backup_id_b, page_id_b3), output)
output)
self.assertIn( self.assertIn(
"Merging backup {0} with backup {1}".format( "INFO: Rename merged full backup {0} to {1}".format(
page_id_b1, backup_id_b), output) backup_id_b, page_id_b3), output)
self.assertIn(
"INFO: Rename {0} to {1}".format(
backup_id_b, page_id_b1), output)
self.assertIn(
"Merging backup {0} with backup {1}".format(
page_id_b2, page_id_b1), output)
self.assertIn(
"INFO: Rename {0} to {1}".format(
page_id_b1, page_id_b2), output)
self.assertIn(
"Merging backup {0} with backup {1}".format(
page_id_b3, page_id_b2), output)
self.assertIn(
"INFO: Rename {0} to {1}".format(
page_id_b2, page_id_b3), output)
# this backup deleted because it is not guarded by retention
self.assertIn(
"INFO: Delete: {0}".format(
page_id_a1), output)
self.assertEqual( self.assertEqual(
self.show_pb(backup_dir, 'node')[2]['id'], self.show_pb(backup_dir, 'node')[2]['id'],
@ -1048,7 +1255,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
self.assertEqual( self.assertEqual(
self.show_pb(backup_dir, 'node')[0]['id'], self.show_pb(backup_dir, 'node')[0]['id'],
page_id_a1) page_id_a2)
self.assertEqual( self.assertEqual(
self.show_pb(backup_dir, 'node')[2]['backup-mode'], self.show_pb(backup_dir, 'node')[2]['backup-mode'],
@ -1056,11 +1263,17 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
self.assertEqual( self.assertEqual(
self.show_pb(backup_dir, 'node')[1]['backup-mode'], self.show_pb(backup_dir, 'node')[1]['backup-mode'],
'PAGE') 'FULL')
self.assertEqual( self.assertEqual(
self.show_pb(backup_dir, 'node')[0]['backup-mode'], self.show_pb(backup_dir, 'node')[0]['backup-mode'],
'FULL') 'PAGE')
output = self.delete_expired(
backup_dir, 'node',
options=[
'--retention-window=1',
'--delete-expired', '--log-level-console=log'])
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -1596,7 +1809,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# create pair of MERGING backup as a result of failed merge # create pair of MERGING backup as a result of failed merge
gdb = self.merge_backup( gdb = self.merge_backup(
backup_dir, 'node', delta_id, gdb=True) backup_dir, 'node', delta_id, gdb=True)
gdb.set_breakpoint('copy_file') gdb.set_breakpoint('backup_non_data_file')
gdb.run_until_break() gdb.run_until_break()
gdb.continue_execution_until_break(2) gdb.continue_execution_until_break(2)
gdb._execute('signal SIGKILL') gdb._execute('signal SIGKILL')
@ -2491,10 +2704,14 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
self.assertTrue(show_tli4_before) self.assertTrue(show_tli4_before)
self.assertTrue(show_tli5_before) self.assertTrue(show_tli5_before)
sleep(5)
output = self.delete_pb( output = self.delete_pb(
backup_dir, 'node', backup_dir, 'node',
options=['--delete-wal', '--wal-depth=2', '--log-level-console=verbose']) options=['--delete-wal', '--wal-depth=2', '--log-level-console=verbose'])
# print(output)
show_tli1_after = self.show_archive(backup_dir, 'node', tli=1) show_tli1_after = self.show_archive(backup_dir, 'node', tli=1)
show_tli2_after = self.show_archive(backup_dir, 'node', tli=2) show_tli2_after = self.show_archive(backup_dir, 'node', tli=2)
show_tli3_after = self.show_archive(backup_dir, 'node', tli=3) show_tli3_after = self.show_archive(backup_dir, 'node', tli=3)