1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-03-21 21:47:17 +02:00

tests: minor fixes, new tests for ptrack

This commit is contained in:
Grigory Smolkin 2018-01-17 20:51:43 +03:00
parent ecd6d1187d
commit d6a2d36403
6 changed files with 494 additions and 206 deletions

View File

@ -162,7 +162,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
except ProbackupException as e: except ProbackupException as e:
self.assertTrue("INFO: Validate backups of the instance 'node'\n" in e.message self.assertTrue("INFO: Validate backups of the instance 'node'\n" in e.message
and 'WARNING: Backup file "{0}" is not found\n'.format(file) in e.message and 'WARNING: Backup file "{0}" is not found\n'.format(file) in e.message
and "WARNING: Backup {0} is corrupted\n".format(backup_id) in e.message and "WARNING: Backup {0} data files are corrupted\n".format(backup_id) in e.message
and "INFO: Some backups are not valid\n" in e.message, and "INFO: Some backups are not valid\n" in e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(repr(e.message), self.cmd)) "\n Unexpected Error Message: {0}\n CMD: {1}".format(repr(e.message), self.cmd))
@ -231,29 +231,94 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
# @unittest.skip("skip") # @unittest.skip("skip")
def test_page_checksumm_fail(self): def test_checksumm_fail_heal_via_ptrack(self):
"""make node, corrupt some page, check that backup failed""" """make node, corrupt some page, check that backup failed"""
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'} pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
) )
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir) self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node) self.add_instance(backup_dir, 'node', node)
node.start() node.start()
self.backup_node(backup_dir, 'node', node, backup_type="full", options=["-j", "4", "--stream"]) self.backup_node(
backup_dir, 'node', node,
backup_type="full", options=["-j", "4", "--stream"])
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,1000) i") "create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"CHECKPOINT;") "CHECKPOINT;")
heap_path = node.safe_psql("postgres", "select pg_relation_filepath('t_heap')").rstrip() heap_path = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
f.seek(9000)
f.write(b"bla")
f.flush()
f.close
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream", '--log-level-file=verbose'])
# open log file and check
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
log_content = f.read()
self.assertIn('block 1, try fetching via SQL', log_content)
self.assertIn('SELECT pg_ptrack_get_block', log_content)
f.close
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK',
"Backup Status should be OK")
# Clean after yourself
# self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_checksumm_fail_heal_via_ptrack_fail(self):
"""make node, corrupt some page, check that backup failed"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
node.safe_psql(
"postgres",
"CHECKPOINT;")
heap_path = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
node.stop() node.stop()
with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f: with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
@ -264,17 +329,27 @@ class BackupTest(ProbackupTest, unittest.TestCase):
node.start() node.start()
try: try:
self.backup_node(backup_dir, 'node', node, backup_type="full", options=["-j", "4", "--stream"]) self.backup_node(
backup_dir, 'node', node,
backup_type="full", options=["-j", "4", "--stream"])
# we should die here because exception is what we expect to happen # we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of page corruption in PostgreSQL instance.\n Output: {0} \n CMD: {1}".format( self.assertEqual(
1, 0,
"Expecting Error because of page "
"corruption in PostgreSQL instance.\n"
" Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd)) repr(self.output), self.cmd))
except ProbackupException as e: except ProbackupException as e:
self.assertIn("ERROR: File", e.message, self.assertTrue(
"\n Unexpected Error Message: {0}\n CMD: {1}".format(repr(e.message), self.cmd)) "WARNING: File" in e.message and
self.assertIn("blknum", e.message, "blknum" in e.message and
"\n Unexpected Error Message: {0}\n CMD: {1}".format(repr(e.message), self.cmd)) "have wrong checksum" in e.message,
self.assertIn("have wrong checksum", e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format(
"\n Unexpected Error Message: {0}\n CMD: {1}".format(repr(e.message), self.cmd)) repr(e.message), self.cmd))
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['Status'] == 'ERROR',
"Backup Status should be ERROR")
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) # self.del_test_dir(module_name, fname)

View File

@ -59,5 +59,15 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
pg_probackup del-instance -B backup-dir pg_probackup del-instance -B backup-dir
--instance=instance_name --instance=instance_name
pg_probackup archive-push -B backup-dir --instance=instance_name
--wal-file-path=wal-file-path
--wal-file-name=wal-file-name
[--compress [--compress-level=compress-level]]
[--overwrite]
pg_probackup archive-get -B backup-dir --instance=instance_name
--wal-file-path=wal-file-path
--wal-file-name=wal-file-name
Read the website for details. <https://github.com/postgrespro/pg_probackup> Read the website for details. <https://github.com/postgrespro/pg_probackup>
Report bugs to <https://github.com/postgrespro/pg_probackup/issues>. Report bugs to <https://github.com/postgrespro/pg_probackup/issues>.

View File

@ -858,7 +858,7 @@ class ProbackupTest(object):
'postmaster.pid', 'postmaster.opts', 'postmaster.pid', 'postmaster.opts',
'pg_internal.init', 'postgresql.auto.conf', 'pg_internal.init', 'postgresql.auto.conf',
'backup_label', 'tablespace_map', 'recovery.conf', 'backup_label', 'tablespace_map', 'recovery.conf',
'ptrack_control', 'ptrack_init' 'ptrack_control', 'ptrack_init', 'pg_control'
] ]
suffixes_to_ignore = ( suffixes_to_ignore = (
'_ptrack' '_ptrack'
@ -1119,7 +1119,10 @@ class GDBobj(ProbackupTest):
continue continue
if line.startswith('*stopped,reason="breakpoint-hit"'): if line.startswith('*stopped,reason="breakpoint-hit"'):
continue continue
if line.startswith('*stopped,reason="exited-normally"'): if (
line.startswith('*stopped,reason="exited-normally"') or
line == '*stopped\n'
):
return return
raise GdbException( raise GdbException(
'Failed to continue execution until exit.\n' 'Failed to continue execution until exit.\n'
@ -1164,11 +1167,10 @@ class GDBobj(ProbackupTest):
self.proc.stdin.flush() self.proc.stdin.flush()
while True: while True:
# sleep(1)
line = self.proc.stdout.readline() line = self.proc.stdout.readline()
output += [line] output += [line]
if self.verbose: if self.verbose:
print(line) print(repr(line))
if line == '^done\n' or line.startswith('*stopped'): if line == '^done\n' or line.startswith('*stopped'):
break break
if running and line.startswith('*running'): if running and line.startswith('*running'):

View File

@ -96,7 +96,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
node_restored.start() node_restored.start()
# Clean after yourself # Clean after yourself
# self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
# @unittest.skip("skip") # @unittest.skip("skip")
def test_page_stream(self): def test_page_stream(self):

View File

@ -85,21 +85,15 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
node.start() node.start()
# FULL BACKUP # FULL BACKUP
# print('START')
# print(node.safe_psql('postgres', "select pg_ptrack_control_lsn()"))
self.backup_node(backup_dir, 'node', node, options=['--stream']) self.backup_node(backup_dir, 'node', node, options=['--stream'])
# print('AFTER FULL')
# print(node.safe_psql('postgres', "select pg_ptrack_control_lsn()"))
# DISABLE PTRACK # DISABLE PTRACK
node.safe_psql('postgres', "alter system set ptrack_enable to off") node.safe_psql('postgres', "alter system set ptrack_enable to off")
node.restart() node.restart()
# print('DISABLED')
# print(node.safe_psql('postgres', "select pg_ptrack_control_lsn()"))
# ENABLE PTRACK # ENABLE PTRACK
node.safe_psql('postgres', "alter system set ptrack_enable to on") node.safe_psql('postgres', "alter system set ptrack_enable to on")
node.restart() node.restart()
# print('ENABLED')
# print(node.safe_psql('postgres', "select pg_ptrack_control_lsn()"))
# PTRACK BACKUP # PTRACK BACKUP
try: try:
@ -165,25 +159,26 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type='ptrack', backup_dir, 'node', node, backup_type='ptrack',
options=['--stream', '--log-level-file=verbose'] options=['--stream', '--log-level-file=verbose']
) )
pgdata = self.pgdata_content(node.data_dir)
self.backup_node( self.backup_node(
backup_dir, 'node', node, backup_type='ptrack', backup_dir, 'node', node, backup_type='ptrack',
options=['--stream', '--log-level-file=verbose'] options=['--stream', '--log-level-file=verbose']
) )
pgdata = self.pgdata_content(node.data_dir)
self.restore_node( self.restore_node(
backup_dir, 'node', node_restored, options=["-j", "4"]) backup_dir, 'node', node_restored, options=["-j", "4"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir) pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf( node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port)) "postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start() node_restored.start()
# Physical comparison
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -258,7 +253,7 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type='ptrack', backup_dir, 'node', node, backup_type='ptrack',
options=['--log-level-file=verbose'] options=['--log-level-file=verbose']
) )
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
old_tablespace = self.get_tblspace_path(node, 'somedata') old_tablespace = self.get_tblspace_path(node, 'somedata')
@ -269,9 +264,10 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
options=["-j", "4", "-T", "{0}={1}".format( options=["-j", "4", "-T", "{0}={1}".format(
old_tablespace, new_tablespace)] old_tablespace, new_tablespace)]
) )
pgdata_restored = self.pgdata_content(node_restored.data_dir)
# Physical comparison # Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored) self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf( node_restored.append_conf(
@ -346,6 +342,7 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
options=['--log-level-file=verbose'] options=['--log-level-file=verbose']
) )
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
old_tablespace = self.get_tblspace_path(node, 'somedata') old_tablespace = self.get_tblspace_path(node, 'somedata')
@ -356,8 +353,10 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
options=["-j", "4", "-T", "{0}={1}".format( options=["-j", "4", "-T", "{0}={1}".format(
old_tablespace, new_tablespace)] old_tablespace, new_tablespace)]
) )
pgdata_restored = self.pgdata_content(node_restored.data_dir)
# Physical comparison # Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored) self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf( node_restored.append_conf(
@ -394,46 +393,51 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
node_restored.cleanup() node_restored.cleanup()
node.start() node.start()
self.backup_node(backup_dir, 'node', node)
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"create table t_heap as select i" "create table t_heap as select i"
" as id from generate_series(0,1) i" " as id from generate_series(0,1) i"
) )
self.backup_node(backup_dir, 'node', node)
self.backup_node( self.backup_node(
backup_dir, 'node', node, backup_type='ptrack', backup_dir, 'node', node, backup_type='ptrack',
options=['--stream', '--log-level-file=verbose'] options=['--stream', '--log-level-file=verbose']
) )
# node.safe_psql( node.safe_psql(
# "postgres", "postgres",
# "update t_heap set id = 100500") "update t_heap set id = 100500")
#
# self.backup_node(
# backup_dir, 'node', node,
# backup_type='ptrack', options=['--stream']
# )
self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=['--stream']
)
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
result = node.safe_psql("postgres", "SELECT * FROM t_heap") result = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.restore_node( self.restore_node(
backup_dir, 'node', node_restored, options=["-j", "4"]) backup_dir, 'node', node_restored, options=["-j", "4"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir) pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf( node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port)) "postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start() node_restored.start()
# Logical comparison # Logical comparison
self.assertEqual( self.assertEqual(
result, result,
node_restored.safe_psql("postgres", "SELECT * FROM t_heap") node_restored.safe_psql("postgres", "SELECT * FROM t_heap")
) )
# Physical comparison
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -485,12 +489,17 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_dir, 'node', node,
backup_type='ptrack', options=['--stream'] backup_type='ptrack', options=['--stream']
) )
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
result = node.safe_psql("postgres", "SELECT * FROM t_heap") result = node.safe_psql("postgres", "SELECT * FROM t_heap")
node.cleanup() node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir) pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node.start() node.start()
# Logical comparison # Logical comparison
@ -498,8 +507,6 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
result, result,
node.safe_psql("postgres", "SELECT * FROM t_heap") node.safe_psql("postgres", "SELECT * FROM t_heap")
) )
# Physical comparison
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -553,6 +560,8 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
node, backup_type='ptrack', node, backup_type='ptrack',
options=['--stream', '--log-level-file=verbose'] options=['--stream', '--log-level-file=verbose']
) )
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
# Drop Node # Drop Node
@ -588,7 +597,11 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format( '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd) repr(self.output), self.cmd)
) )
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir) pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node.start() node.start()
while node.safe_psql( while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n': "postgres", "select pg_is_in_recovery()") == 't\n':
@ -596,9 +609,6 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new) self.assertEqual(ptrack_result, ptrack_result_new)
if self.paranoia:
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -653,6 +663,7 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type='ptrack') backup_dir, 'node', node, backup_type='ptrack')
ptrack_target_time = self.show_pb( ptrack_target_time = self.show_pb(
backup_dir, 'node', ptrack_backup_id)['recovery-time'] backup_dir, 'node', ptrack_backup_id)['recovery-time']
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
# Drop Node # Drop Node
@ -688,7 +699,11 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format( '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd) repr(self.output), self.cmd)
) )
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir) pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node.start() node.start()
while node.safe_psql( while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n': "postgres", "select pg_is_in_recovery()") == 't\n':
@ -696,9 +711,6 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new) self.assertEqual(ptrack_result, ptrack_result_new)
if self.paranoia:
self.compare_pgdata(pgdata, pgdata_restored)
node.cleanup() node.cleanup()
# Clean after yourself # Clean after yourself
@ -712,10 +724,17 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
self.maxDiff = None self.maxDiff = None
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s', 'ptrack_enable': 'on', 'autovacuum': 'off'} pg_options={
'wal_level':
'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'ptrack_enable': 'on',
'autovacuum': 'off'}
) )
self.init_pb(backup_dir) self.init_pb(backup_dir)
@ -725,34 +744,57 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
# FULL BACKUP # FULL BACKUP
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i") "create table t_heap as select i as id, md5(i::text) as text, "
node.safe_psql("postgres", "SELECT * FROM t_heap") "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='full', options=["--stream"]) node.safe_psql(
start_lsn_full = self.show_pb(backup_dir, 'node', backup_id)['start-lsn'] "postgres",
"SELECT * FROM t_heap")
backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='full', options=["--stream"])
start_lsn_full = self.show_pb(
backup_dir, 'node', backup_id)['start-lsn']
# PTRACK BACKUP # PTRACK BACKUP
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i") "insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(100,200) i")
node.safe_psql("postgres", "SELECT * FROM t_heap") node.safe_psql("postgres", "SELECT * FROM t_heap")
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"]) backup_id = self.backup_node(
start_lsn_ptrack = self.show_pb(backup_dir, 'node', backup_id)['start-lsn'] backup_dir, 'node', node,
backup_type='ptrack', options=["--stream"])
start_lsn_ptrack = self.show_pb(
backup_dir, 'node', backup_id)['start-lsn']
self.delete_pb(backup_dir, 'node', backup_id) self.delete_pb(backup_dir, 'node', backup_id)
# SECOND PTRACK BACKUP # SECOND PTRACK BACKUP
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(200,300) i") "insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(200,300) i")
try: try:
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"]) self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=["--stream"])
# we should die here because exception is what we expect to happen # we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of LSN mismatch from ptrack_control and previous backup start_lsn.\n Output: {0} \n CMD: {1}".format( self.assertEqual(
repr(self.output), self.cmd)) 1, 0,
"Expecting Error because of LSN mismatch from ptrack_control "
"and previous backup start_lsn.\n"
" Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd))
except ProbackupException as e: except ProbackupException as e:
self.assertTrue('ERROR: LSN from ptrack_control' in e.message, self.assertTrue(
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) 'ERROR: LSN from ptrack_control' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -763,10 +805,16 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
self.maxDiff = None self.maxDiff = None
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s', 'ptrack_enable': 'on', 'autovacuum': 'off'} pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'ptrack_enable': 'on',
'autovacuum': 'off'}
) )
self.init_pb(backup_dir) self.init_pb(backup_dir)
@ -777,16 +825,20 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
# FULL BACKUP # FULL BACKUP
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i") "create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
node.safe_psql("postgres", "SELECT * FROM t_heap") node.safe_psql("postgres", "SELECT * FROM t_heap")
self.backup_node(backup_dir, 'node', node) self.backup_node(backup_dir, 'node', node)
# PAGE BACKUP # PAGE BACKUP
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i") "insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(100,200) i")
node.safe_psql("postgres", "SELECT * FROM t_heap") node.safe_psql("postgres", "SELECT * FROM t_heap")
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
self.delete_pb(backup_dir, 'node', backup_id) self.delete_pb(backup_dir, 'node', backup_id)
# sys.exit(1) # sys.exit(1)
@ -794,16 +846,24 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
# PTRACK BACKUP # PTRACK BACKUP
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(200,300) i") "insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(200,300) i")
try: try:
self.backup_node(backup_dir, 'node', node, backup_type='ptrack') self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
# we should die here because exception is what we expect to happen # we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of LSN mismatch from ptrack_control and previous backup start_lsn.\n Output: {0} \n CMD: {1}".format( self.assertEqual(
1, 0,
"Expecting Error because of LSN mismatch from ptrack_control "
"and previous backup start_lsn.\n "
"Output: {0}\n CMD: {1}".format(
repr(self.output), self.cmd)) repr(self.output), self.cmd))
except ProbackupException as e: except ProbackupException as e:
self.assertTrue('ERROR: LSN from ptrack_control' in e.message, self.assertTrue(
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) 'ERROR: LSN from ptrack_control' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -814,10 +874,16 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
self.maxDiff = None self.maxDiff = None
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s', 'ptrack_enable': 'on', 'autovacuum': 'off'} pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'ptrack_enable': 'on', 'autovacuum': 'off'
}
) )
self.init_pb(backup_dir) self.init_pb(backup_dir)
@ -850,16 +916,28 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
# PTRACK BACKUP # PTRACK BACKUP
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(200,300) i") "insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(200,300) i")
try: try:
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"]) self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=["--stream"])
# we should die here because exception is what we expect to happen # we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of LSN mismatch from ptrack_control and previous backup start_lsn.\n Output: {0} \n CMD: {1}".format( self.assertEqual(
repr(self.output), self.cmd)) 1, 0,
"Expecting Error because of LSN mismatch from ptrack_control "
"and previous backup start_lsn.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd)
)
except ProbackupException as e: except ProbackupException as e:
self.assertTrue('ERROR: LSN from ptrack_control' in e.message self.assertTrue(
and 'Create new full backup before an incremental one' in e.message, "ERROR: LSN from ptrack_control" in e.message and
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) "Create new full backup before "
"an incremental one" in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -870,10 +948,18 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
self.maxDiff = None self.maxDiff = None
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_size': '10GB', 'max_wal_senders': '2', 'checkpoint_timeout': '5min', 'ptrack_enable': 'on', 'autovacuum': 'off'} pg_options={
'wal_level': 'replica',
'max_wal_size': '10GB',
'max_wal_senders': '2',
'checkpoint_timeout': '5min',
'ptrack_enable': 'on',
'autovacuum': 'off'
}
) )
self.init_pb(backup_dir) self.init_pb(backup_dir)
@ -882,61 +968,97 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
node.start() node.start()
# FULL BACKUP # FULL BACKUP
node.safe_psql("postgres", node.safe_psql(
"create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i") "postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
node.safe_psql("postgres", "SELECT * FROM t_heap") node.safe_psql("postgres", "SELECT * FROM t_heap")
self.backup_node(backup_dir, 'node', node, options=["--stream", "--log-level-file=verbose"]) self.backup_node(
#sys.exit(1) backup_dir, 'node', node,
options=["--stream", "--log-level-file=verbose"])
# CREATE DATABASE DB1 # CREATE DATABASE DB1
node.safe_psql("postgres", "create database db1") node.safe_psql("postgres", "create database db1")
node.safe_psql("db1", "create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i") node.safe_psql(
"db1",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
# PTRACK BACKUP # PTRACK BACKUP
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream", "--log-level-file=verbose"]) backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack',
options=["--stream", "--log-level-file=verbose"]
)
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
# RESTORE # RESTORE
node_restored = self.make_simple_node(base_dir="{0}/{1}/node_restored".format(module_name, fname)) node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
)
node_restored.cleanup() node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored,
backup_id=backup_id, options=["-j", "4"])
# COMPARE PHYSICAL CONTENT # COMPARE PHYSICAL CONTENT
self.restore_node(backup_dir, 'node', node_restored, backup_id=backup_id, options=["-j", "4"])
pgdata_restored = self.pgdata_content(node_restored.data_dir)
if self.paranoia: if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored) self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE # START RESTORED NODE
node_restored.append_conf("postgresql.auto.conf", "port = {0}".format(node_restored.port)) node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start() node_restored.start()
# DROP DATABASE DB1 # DROP DATABASE DB1
node.safe_psql( node.safe_psql(
"postgres", "drop database db1") "postgres", "drop database db1")
# SECOND PTRACK BACKUP # SECOND PTRACK BACKUP
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"]) backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=["--stream"]
)
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
# RESTORE SECOND PTRACK BACKUP # RESTORE SECOND PTRACK BACKUP
node_restored.cleanup() node_restored.cleanup()
self.restore_node(backup_dir, 'node', node_restored, backup_id=backup_id, options=["-j", "4"]) self.restore_node(
backup_dir, 'node', node_restored,
# START RESTORED NODE backup_id=backup_id, options=["-j", "4"]
node_restored.append_conf("postgresql.auto.conf", "port = {0}".format(node_restored.port)) )
node_restored.start()
# COMPARE PHYSICAL CONTENT # COMPARE PHYSICAL CONTENT
pgdata_restored = self.pgdata_content(node_restored.data_dir)
if self.paranoia: if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored) self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
try: try:
node_restored.safe_psql('db1', 'select 1') node_restored.safe_psql('db1', 'select 1')
# we should die here because exception is what we expect to happen # we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because we are connecting to deleted database.\n Output: {0} \n CMD: {1}".format( self.assertEqual(
repr(self.output), self.cmd)) 1, 0,
"Expecting Error because we are connecting to deleted database"
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd)
)
except ClusterException as e: except ClusterException as e:
self.assertTrue('FATAL: database "db1" does not exist' in e.message, self.assertTrue(
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) 'FATAL: database "db1" does not exist' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd)
)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -990,6 +1112,7 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
backup_type='ptrack', backup_type='ptrack',
options=["--stream", "--log-level-file=verbose"] options=["--stream", "--log-level-file=verbose"]
) )
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
# node.stop() # node.stop()
# node.cleanup() # node.cleanup()
@ -1016,12 +1139,15 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
) )
# GET RESTORED PGDATA AND COMPARE # GET RESTORED PGDATA AND COMPARE
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir) pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE # START RESTORED NODE
node_restored.append_conf( node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port)) 'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start() node_restored.start()
time.sleep(5)
while node_restored.safe_psql( while node_restored.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n': "postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1) time.sleep(1)
@ -1029,8 +1155,6 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
"postgres", "select * from t_heap") "postgres", "select * from t_heap")
self.assertEqual(result, result_new, 'lost some data after restore') self.assertEqual(result, result_new, 'lost some data after restore')
if self.paranoia:
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -1043,10 +1167,16 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
self.maxDiff = None self.maxDiff = None
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s', 'ptrack_enable': 'on', 'autovacuum': 'off'} pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'ptrack_enable': 'on',
'autovacuum': 'off'}
) )
self.init_pb(backup_dir) self.init_pb(backup_dir)
@ -1060,26 +1190,37 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
self.create_tblspace_in_node(node, 'somedata') self.create_tblspace_in_node(node, 'somedata')
# ALTER DATABASE # ALTER DATABASE
node.safe_psql("template1", node.safe_psql(
"template1",
"alter database postgres set tablespace somedata") "alter database postgres set tablespace somedata")
#sys.exit(1)
# PTRACK BACKUP # PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream", '--log-level-file=verbose']) self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=["--stream", '--log-level-file=verbose'])
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
node.stop() node.stop()
# RESTORE # RESTORE
node_restored = self.make_simple_node(base_dir="{0}/{1}/node_restored".format(module_name, fname)) node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
node_restored.cleanup() node_restored.cleanup()
self.restore_node(backup_dir, 'node', node_restored, options=["-j", "4", self.restore_node(
"-T", "{0}={1}".format(self.get_tblspace_path(node,'somedata'), self.get_tblspace_path(node_restored,'somedata'))]) backup_dir, 'node',
node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(
self.get_tblspace_path(node, 'somedata'),
self.get_tblspace_path(node_restored, 'somedata'))])
# GET PHYSICAL CONTENT # GET PHYSICAL CONTENT and COMPARE PHYSICAL CONTENT
pgdata_restored = self.pgdata_content(node_restored.data_dir)
# COMPARE PHYSICAL CONTENT
if self.paranoia: if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored) self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE # START RESTORED NODE
node_restored.start() node_restored.start()
@ -1092,10 +1233,16 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
self.maxDiff = None self.maxDiff = None
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s', 'ptrack_enable': 'on', 'autovacuum': 'off'} pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'ptrack_enable': 'on',
'autovacuum': 'off'}
) )
self.init_pb(backup_dir) self.init_pb(backup_dir)
@ -1107,7 +1254,9 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
# CREATE TABLE # CREATE TABLE
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i") "create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
result = node.safe_psql("postgres", "select * from t_heap") result = node.safe_psql("postgres", "select * from t_heap")
# FULL BACKUP # FULL BACKUP
self.backup_node(backup_dir, 'node', node, options=["--stream"]) self.backup_node(backup_dir, 'node', node, options=["--stream"])
@ -1116,19 +1265,25 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
node.safe_psql( node.safe_psql(
"postgres", "alter table t_heap set tablespace somedata") "postgres", "alter table t_heap set tablespace somedata")
# PTRACK BACKUP # PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"]) self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=["--stream"])
# Move table back to default tablespace # Move table back to default tablespace
node.safe_psql( node.safe_psql(
"postgres", "alter table t_heap set tablespace pg_default") "postgres", "alter table t_heap set tablespace pg_default")
# SECOND PTRACK BACKUP # SECOND PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"]) self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=["--stream"])
# DROP TABLESPACE 'somedata' # DROP TABLESPACE 'somedata'
node.safe_psql( node.safe_psql(
"postgres", "drop tablespace somedata") "postgres", "drop tablespace somedata")
# THIRD PTRACK BACKUP # THIRD PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"]) self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=["--stream"])
tblspace = self.get_tblspace_path(node, 'somedata') tblspace = self.get_tblspace_path(node, 'somedata')
node.cleanup() node.cleanup()
@ -1136,9 +1291,16 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
node.start() node.start()
tblspc_exist = node.safe_psql("postgres", "select exists(select 1 from pg_tablespace where spcname = 'somedata')") tblspc_exist = node.safe_psql(
"postgres",
"select exists(select 1 from "
"pg_tablespace where spcname = 'somedata')")
if tblspc_exist.rstrip() == 't': if tblspc_exist.rstrip() == 't':
self.assertEqual(1, 0, "Expecting Error because tablespace 'somedata' should not be present") self.assertEqual(
1, 0,
"Expecting Error because "
"tablespace 'somedata' should not be present")
result_new = node.safe_psql("postgres", "select * from t_heap") result_new = node.safe_psql("postgres", "select * from t_heap")
self.assertEqual(result, result_new) self.assertEqual(result, result_new)
@ -1152,10 +1314,12 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
self.maxDiff = None self.maxDiff = None
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', pg_options={
'wal_level': 'replica', 'max_wal_senders': '2',
'checkpoint_timeout': '30s', 'ptrack_enable': 'on', 'checkpoint_timeout': '30s', 'ptrack_enable': 'on',
'autovacuum': 'off'} 'autovacuum': 'off'}
) )
@ -1170,7 +1334,9 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
# CREATE TABLE # CREATE TABLE
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i") "create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
result = node.safe_psql("postgres", "select * from t_heap") result = node.safe_psql("postgres", "select * from t_heap")
# FULL BACKUP # FULL BACKUP
self.backup_node(backup_dir, 'node', node, options=["--stream"]) self.backup_node(backup_dir, 'node', node, options=["--stream"])
@ -1182,35 +1348,41 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
result = node.safe_psql("postgres", "select * from t_heap") result = node.safe_psql("postgres", "select * from t_heap")
# FIRTS PTRACK BACKUP # FIRTS PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream", "--log-level-file=verbose"]) self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=["--stream", "--log-level-file=verbose"])
# GET PHYSICAL CONTENT FROM NODE # GET PHYSICAL CONTENT FROM NODE
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
# Restore ptrack backup # Restore ptrack backup
restored_node = self.make_simple_node(base_dir="{0}/{1}/restored_node".format(module_name, fname)) restored_node = self.make_simple_node(
base_dir="{0}/{1}/restored_node".format(module_name, fname))
restored_node.cleanup() restored_node.cleanup()
tblspc_path_new = self.get_tblspace_path(restored_node, 'somedata_restored') tblspc_path_new = self.get_tblspace_path(
restored_node, 'somedata_restored')
self.restore_node(backup_dir, 'node', restored_node, options=[ self.restore_node(backup_dir, 'node', restored_node, options=[
"-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
# GET PHYSICAL CONTENT FROM RESTORED NODE # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(restored_node.data_dir) pgdata_restored = self.pgdata_content(restored_node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE # START RESTORED NODE
restored_node.append_conf("postgresql.auto.conf", "port = {0}".format(restored_node.port)) restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start() restored_node.start()
while restored_node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n': while restored_node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1) time.sleep(1)
# COMPARE LOGICAL CONTENT # COMPARE LOGICAL CONTENT
result_new = restored_node.safe_psql("postgres", "select * from t_heap") result_new = restored_node.safe_psql(
"postgres", "select * from t_heap")
self.assertEqual(result, result_new) self.assertEqual(result, result_new)
# COMPARE PHYSICAL CONTENT
if self.paranoia:
self.compare_pgdata(pgdata, pgdata_restored)
restored_node.cleanup() restored_node.cleanup()
shutil.rmtree(tblspc_path_new, ignore_errors=True) shutil.rmtree(tblspc_path_new, ignore_errors=True)
@ -1218,29 +1390,35 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
node.safe_psql( node.safe_psql(
"postgres", "alter table t_heap set tablespace pg_default") "postgres", "alter table t_heap set tablespace pg_default")
# SECOND PTRACK BACKUP # SECOND PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream", "--log-level-file=verbose"]) self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=["--stream", "--log-level-file=verbose"])
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
# Restore second ptrack backup and check table consistency # Restore second ptrack backup and check table consistency
self.restore_node(backup_dir, 'node', restored_node, options=[ self.restore_node(backup_dir, 'node', restored_node, options=[
"-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
# GET PHYSICAL CONTENT FROM RESTORED NODE # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(restored_node.data_dir) pgdata_restored = self.pgdata_content(restored_node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE # START RESTORED NODE
restored_node.append_conf("postgresql.auto.conf", "port = {0}".format(restored_node.port)) restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start() restored_node.start()
while restored_node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n': while restored_node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
time.sleep(1) time.sleep(1)
result_new = restored_node.safe_psql("postgres", "select * from t_heap") result_new = restored_node.safe_psql(
"postgres", "select * from t_heap")
self.assertEqual(result, result_new) self.assertEqual(result, result_new)
if self.paranoia:
# COMPARE PHYSICAL CONTENT
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -1249,12 +1427,15 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup""" """Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', pg_options={
'wal_level': 'replica', 'max_wal_senders': '2',
'ptrack_enable': 'on', 'fsync': 'off', 'shared_buffers': '1GB', 'ptrack_enable': 'on', 'fsync': 'off', 'shared_buffers': '1GB',
'maintenance_work_mem': '1GB', 'autovacuum': 'off', 'full_page_writes': 'off'} 'maintenance_work_mem': '1GB', 'autovacuum': 'off',
'full_page_writes': 'off'}
) )
self.init_pb(backup_dir) self.init_pb(backup_dir)
@ -1280,44 +1461,64 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
pgbench.wait() pgbench.wait()
node.safe_psql("postgres", "checkpoint") node.safe_psql("postgres", "checkpoint")
idx_ptrack['new_size'] = self.get_fork_size(node, 'pgbench_accounts') idx_ptrack['new_size'] = self.get_fork_size(
idx_ptrack['new_pages'] = self.get_md5_per_page_for_fork(idx_ptrack['path'], idx_ptrack['new_size']) node,
idx_ptrack['ptrack'] = self.get_ptrack_bits_per_page_for_fork(node, idx_ptrack['path']) 'pgbench_accounts'
)
idx_ptrack['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack['path'],
idx_ptrack['new_size']
)
idx_ptrack['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node,
idx_ptrack['path']
)
self.check_ptrack_sanity(idx_ptrack) self.check_ptrack_sanity(idx_ptrack)
# GET LOGICAL CONTENT FROM NODE # GET LOGICAL CONTENT FROM NODE
result = node.safe_psql("postgres", "select * from pgbench_accounts") result = node.safe_psql("postgres", "select * from pgbench_accounts")
# FIRTS PTRACK BACKUP # FIRTS PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--log-level-file=verbose"]) self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=["--log-level-file=verbose"]
)
# GET PHYSICAL CONTENT FROM NODE # GET PHYSICAL CONTENT FROM NODE
pgdata = self.pgdata_content(node.data_dir) pgdata = self.pgdata_content(node.data_dir)
# RESTORE NODE # RESTORE NODE
restored_node = self.make_simple_node(base_dir="{0}/{1}/restored_node".format(module_name, fname)) restored_node = self.make_simple_node(
base_dir="{0}/{1}/restored_node".format(module_name, fname))
restored_node.cleanup() restored_node.cleanup()
tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path = self.get_tblspace_path(node, 'somedata')
tblspc_path_new = self.get_tblspace_path(restored_node, 'somedata_restored') tblspc_path_new = self.get_tblspace_path(
restored_node,
'somedata_restored'
)
self.restore_node(backup_dir, 'node', restored_node, options=[ self.restore_node(backup_dir, 'node', restored_node, options=[
"-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)]) "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
# GET PHYSICAL CONTENT FROM NODE_RESTORED # GET PHYSICAL CONTENT FROM NODE_RESTORED
if self.paranoia:
pgdata_restored = self.pgdata_content(restored_node.data_dir) pgdata_restored = self.pgdata_content(restored_node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE # START RESTORED NODE
restored_node.append_conf("postgresql.auto.conf", "port = {0}".format(restored_node.port)) restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start() restored_node.start()
while restored_node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n': while restored_node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
time.sleep(1) time.sleep(1)
result_new = restored_node.safe_psql("postgres", "select * from pgbench_accounts") result_new = restored_node.safe_psql(
"postgres",
"select * from pgbench_accounts"
)
# COMPARE RESTORED FILES # COMPARE RESTORED FILES
self.assertEqual(result, result_new, 'data is lost') self.assertEqual(result, result_new, 'data is lost')
if self.paranoia:
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)

View File

@ -125,7 +125,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
self.backup_node(backup_dir, 'node', node) self.backup_node(backup_dir, 'node', node)
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"insert into t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100500) i") "insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100500) i")
self.backup_node(backup_dir, 'node', node) self.backup_node(backup_dir, 'node', node)