1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-07-17 07:22:20 +02:00

tests: new tests for compressed archive, pg_receivexlog support added

This commit is contained in:
Grigory Smolkin
2017-12-19 13:17:58 +03:00
parent 269812aafc
commit 0a073b4c63
3 changed files with 215 additions and 78 deletions

View File

@ -33,24 +33,27 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100) i") "create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100) i")
result = node.safe_psql("postgres", "SELECT * FROM t_heap") result = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.backup_node(backup_dir, 'node', node) self.backup_node(backup_dir, 'node', node, options=["--log-level-file=verbose"])
node.cleanup() node.cleanup()
self.restore_node(backup_dir, 'node', node) self.restore_node(backup_dir, 'node', node)
node.start() node.start()
while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1)
# Recreate backup calagoue # Recreate backup calagoue
self.init_pb(backup_dir) self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node) self.add_instance(backup_dir, 'node', node)
# Make backup # Make backup
sleep(5) self.backup_node(backup_dir, 'node', node, options=["--log-level-file=verbose"])
self.backup_node(backup_dir, 'node', node)
node.cleanup() node.cleanup()
# Restore Database # Restore Database
self.restore_node(backup_dir, 'node', node) self.restore_node(backup_dir, 'node', node)
node.start() node.start()
while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1)
self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"), self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
'data after restore not equal to original data') 'data after restore not equal to original data')
@ -78,14 +81,13 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
"postgres", "postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100) i") "create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100) i")
backup_id = self.backup_node(backup_dir, 'node', node) backup_id = self.backup_node(backup_dir, 'node', node)
recovery_time = self.show_pb(backup_dir, 'node', backup_id)["recovery-time"]
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"insert into t_heap select 100501 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,1) i") "insert into t_heap select 100501 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,1) i")
# SECOND TIMELIN # SECOND TIMELIN
node.cleanup() node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["--time={0}".format(recovery_time)]) self.restore_node(backup_dir, 'node', node)
node.start() node.start()
while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n': while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1) sleep(1)
@ -105,7 +107,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# THIRD TIMELINE # THIRD TIMELINE
node.cleanup() node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["--time={0}".format(recovery_time)]) self.restore_node(backup_dir, 'node', node)
node.start() node.start()
while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n': while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1) sleep(1)
@ -124,7 +126,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# FOURTH TIMELINE # FOURTH TIMELINE
node.cleanup() node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["--time={0}".format(recovery_time)]) self.restore_node(backup_dir, 'node', node)
node.start() node.start()
while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n': while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1) sleep(1)
@ -134,7 +136,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# FIFTH TIMELINE # FIFTH TIMELINE
node.cleanup() node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["--time={0}".format(recovery_time)]) self.restore_node(backup_dir, 'node', node)
node.start() node.start()
while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n': while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1) sleep(1)
@ -144,7 +146,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# SIXTH TIMELINE # SIXTH TIMELINE
node.cleanup() node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["--time={0}".format(recovery_time)]) self.restore_node(backup_dir, 'node', node)
node.start() node.start()
while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n': while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1) sleep(1)
@ -184,7 +186,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
node.append_conf('postgresql.auto.conf', "archive_command = '{0} %p %f'".format(archive_script_path)) node.append_conf('postgresql.auto.conf', "archive_command = '{0} %p %f'".format(archive_script_path))
node.start() node.start()
try: try:
self.backup_node(backup_dir, 'node', node, options=["--stream"]) self.backup_node(backup_dir, 'node', node, options=[
"--archive-timeout=60", "--log-level-file=verbose","--stream"])
# we should die here because exception is what we expect to happen # we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because pg_stop_backup failed to answer.\n Output: {0} \n CMD: {1}".format( self.assertEqual(1, 0, "Expecting Error because pg_stop_backup failed to answer.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd)) repr(self.output), self.cmd))
@ -193,6 +196,11 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
and "cancel it" in e.message, and "cancel it" in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
log_file = os.path.join(node.logs_dir, 'postgresql.log')
with open(log_file, 'r') as f:
log_content = f.read()
self.assertNotIn('FailedAssertion', log_content, 'PostgreSQL crashed because of a failed assert')
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
@ -211,9 +219,16 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node)
wals_dir = os.path.join(backup_dir, 'wal', 'node') wals_dir = os.path.join(backup_dir, 'wal', 'node')
file = os.path.join(wals_dir, '000000010000000000000001') if self.archive_compress:
file = os.path.join(wals_dir, '000000010000000000000001.gz')
else:
file = os.path.join(wals_dir, '000000010000000000000001')
with open(file, 'a') as f: with open(file, 'a') as f:
pass f.write(b"blablablaadssaaaaaaaaaaaaaaa")
f.flush()
f.close()
node.start() node.start()
node.safe_psql( node.safe_psql(
"postgres", "postgres",
@ -224,7 +239,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.assertTrue('LOG: archive command failed with exit code 1' in log_content self.assertTrue('LOG: archive command failed with exit code 1' in log_content
and 'DETAIL: The failed archive command was:' in log_content and 'DETAIL: The failed archive command was:' in log_content
and 'INFO: pg_probackup archive-push from' in log_content and 'INFO: pg_probackup archive-push from' in log_content
and "ERROR: file '{0}', already exists.".format(file) in log_content, and 'ERROR: WAL segment "{0}" already exists.'.format(file) in log_content,
'Expecting error messages about failed archive_command' 'Expecting error messages about failed archive_command'
) )
self.assertFalse('pg_probackup archive-push completed successfully' in log_content) self.assertFalse('pg_probackup archive-push completed successfully' in log_content)
@ -250,7 +265,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname), master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True, set_replication=True,
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s'} pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s', 'max_wal_size': '1GB'}
) )
self.init_pb(backup_dir) self.init_pb(backup_dir)
# ADD INSTANCE 'MASTER' # ADD INSTANCE 'MASTER'
@ -305,7 +320,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
"insert into t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(512,768) i") "insert into t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(512,768) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap") before = master.safe_psql("postgres", "SELECT * FROM t_heap")
backup_id = self.backup_node(backup_dir, 'replica', replica, backup_type='page', options=['--archive-timeout=30', backup_id = self.backup_node(backup_dir, 'replica', replica, backup_type='page', options=['--archive-timeout=30',
'--master-host=localhost', '--master-db=postgres','--master-port={0}'.format(master.port)]) '--log-level-file=verbose', '--master-host=localhost', '--master-db=postgres','--master-port={0}'.format(master.port)])
self.validate_pb(backup_dir, 'replica') self.validate_pb(backup_dir, 'replica')
self.assertEqual('OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) self.assertEqual('OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
@ -319,11 +334,11 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.assertEqual(before, after) self.assertEqual(before, after)
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) # self.del_test_dir(module_name, fname)
# @unittest.expectedFailure # @unittest.expectedFailure
# @unittest.skip("skip") # @unittest.skip("skip")
def test_master_and_replica_concurrent_archiving(self): def test_master_and_replica_parallel_archiving(self):
"""make node 'master 'with archiving, take archive backup and turn it into replica, set replica with archiving, make archive backup from replica, make archive backup from master""" """make node 'master 'with archiving, take archive backup and turn it into replica, set replica with archiving, make archive backup from replica, make archive backup from master"""
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -343,7 +358,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
master.psql( master.psql(
"postgres", "postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") "create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
# TAKE FULL ARCHIVE BACKUP FROM MASTER # TAKE FULL ARCHIVE BACKUP FROM MASTER
self.backup_node(backup_dir, 'master', master) self.backup_node(backup_dir, 'master', master)
@ -370,7 +385,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.assertEqual(before, after) self.assertEqual(before, after)
# TAKE FULL ARCHIVE BACKUP FROM REPLICA # TAKE FULL ARCHIVE BACKUP FROM REPLICA
backup_id = self.backup_node(backup_dir, 'replica', replica, options=['--archive-timeout=30', backup_id = self.backup_node(backup_dir, 'replica', replica, options=['--archive-timeout=20', '--log-level-file=verbose',
'--master-host=localhost', '--master-db=postgres','--master-port={0}'.format(master.port)]) '--master-host=localhost', '--master-db=postgres','--master-port={0}'.format(master.port)])
self.validate_pb(backup_dir, 'replica') self.validate_pb(backup_dir, 'replica')
self.assertEqual('OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) self.assertEqual('OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
@ -380,10 +395,79 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.validate_pb(backup_dir, 'master') self.validate_pb(backup_dir, 'master')
self.assertEqual('OK', self.show_pb(backup_dir, 'master', backup_id)['status']) self.assertEqual('OK', self.show_pb(backup_dir, 'master', backup_id)['status'])
# Clean after yourself
# self.del_test_dir(module_name, fname)
# @unittest.expectedFailure # @unittest.expectedFailure
@unittest.skip("skip") # @unittest.skip("skip")
def test_archive_compress(self): def test_master_and_replica_concurrent_archiving(self):
"""Test compression""" """make node 'master 'with archiving, take archive backup and turn it into replica, set replica with archiving, make archive backup from replica, make archive backup from master"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s'}
)
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.init_pb(backup_dir)
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)
master.start()
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
# TAKE FULL ARCHIVE BACKUP FROM MASTER
self.backup_node(backup_dir, 'master', master)
# GET LOGICAL CONTENT FROM MASTER
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
# GET PHYSICAL CONTENT FROM MASTER
pgdata_master = self.pgdata_content(master.data_dir)
# Settings for Replica
self.restore_node(backup_dir, 'master', replica)
# CHECK PHYSICAL CORRECTNESS on REPLICA
pgdata_replica = self.pgdata_content(replica.data_dir)
self.compare_pgdata(pgdata_master, pgdata_replica)
self.set_replica(master, replica, synchronous=True)
# ADD INSTANCE REPLICA
# self.add_instance(backup_dir, 'replica', replica)
# SET ARCHIVING FOR REPLICA
# self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# CHECK LOGICAL CORRECTNESS on REPLICA
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
# TAKE FULL ARCHIVE BACKUP FROM REPLICA
backup_id = self.backup_node(backup_dir, 'master', replica, options=['--archive-timeout=30',
'--master-host=localhost', '--master-db=postgres','--master-port={0}'.format(master.port)])
self.validate_pb(backup_dir, 'master')
self.assertEqual('OK', self.show_pb(backup_dir, 'master', backup_id)['status'])
# TAKE FULL ARCHIVE BACKUP FROM MASTER
backup_id = self.backup_node(backup_dir, 'master', master)
self.validate_pb(backup_dir, 'master')
self.assertEqual('OK', self.show_pb(backup_dir, 'master', backup_id)['status'])
# Clean after yourself
# self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_archive_pg_receivexlog(self):
"""Test backup with pg_receivexlog wal delivary method"""
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
@ -393,51 +477,49 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
) )
self.init_pb(backup_dir) self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node) self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start() node.start()
if self.get_version(node) < 100000:
pg_receivexlog_path = node.get_bin_path('pg_receivexlog')
else:
pg_receivexlog_path = node.get_bin_path('pg_receivewal')
self.backup_node(backup_dir, 'node', node) pg_receivexlog = self.run_binary([pg_receivexlog_path, '-p', str(node.port), '--synchronous',
'-D', os.path.join(backup_dir, 'wal', 'node')], async=True)
if pg_receivexlog.returncode:
self.assertFalse(True, 'Failed to start pg_receivexlog: {0}'.format(pg_receivexlog.communicate()[1]))
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i") "create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.backup_node(backup_dir, 'node', node, backup_type='page')
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.start()
self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
'data after restore not equal to original data')
self.backup_node(backup_dir, 'node', node) self.backup_node(backup_dir, 'node', node)
#PAGE
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(10000,20000) i") "insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(10000,20000) i")
self.backup_node(backup_dir, 'node', node,backup_type='page')
result = node.safe_psql("postgres", "SELECT * FROM t_heap") result = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.validate_pb(backup_dir)
self.backup_node(backup_dir, 'node', node, backup_type='page')
# Check data correctness
node.cleanup() node.cleanup()
self.restore_node(backup_dir, 'node', node) self.restore_node(backup_dir, 'node', node)
node.start() node.start()
self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"), self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
'data after restore not equal to original data') 'data after restore not equal to original data')
# Clean after yourself # Clean after yourself
# self.del_test_dir(module_name, fname) pg_receivexlog.kill()
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure # @unittest.expectedFailure
@unittest.skip("skip") # @unittest.skip("skip")
def test_archive_pg_receivexlog(self): def test_archive_pg_receivexlog_compression_pg10(self):
"""Description in jira issue PGPRO-434""" """Test backup with pg_receivewal compressed wal delivary method"""
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
@ -447,55 +529,41 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
) )
self.init_pb(backup_dir) self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node) self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start() node.start()
if self.get_version(node) < 100000:
self.assertFalse(True, 'You need PostgreSQL 10 for this test')
else:
pg_receivexlog_path = node.get_bin_path('pg_receivewal')
pg_receivexlog = self.run_binary([pg_receivexlog_path, '-p', str(node.port), '--synchronous',
'-Z', '9', '-D', os.path.join(backup_dir, 'wal', 'node')], async=True)
if pg_receivexlog.returncode:
self.assertFalse(True, 'Failed to start pg_receivexlog: {0}'.format(pg_receivexlog.communicate()[1]))
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100) i") "create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.backup_node(backup_dir, 'node', node) self.backup_node(backup_dir, 'node', node)
node.cleanup() #PAGE
self.restore_node(backup_dir, 'node', node, backup_type='page')
node.start()
self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
'data after restore not equal to original data')
# Clean after yourself
# self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
@unittest.skip("skip")
def test_archive_pg_receivexlog_compression_pg_10(self):
"""Description in jira issue PGPRO-434"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql( node.safe_psql(
"postgres", "postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100) i") "insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(10000,20000) i")
self.backup_node(backup_dir, 'node', node,backup_type='page')
result = node.safe_psql("postgres", "SELECT * FROM t_heap") result = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.backup_node(backup_dir, 'node', node) self.validate_pb(backup_dir)
# Check data correctness
node.cleanup() node.cleanup()
self.restore_node(backup_dir, 'node', node) self.restore_node(backup_dir, 'node', node)
node.start() node.start()
self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"), self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
'data after restore not equal to original data') 'data after restore not equal to original data')
# Clean after yourself # Clean after yourself
# self.del_test_dir(module_name, fname) pg_receivexlog.kill()
self.del_test_dir(module_name, fname)

View File

@ -374,6 +374,24 @@ class ProbackupTest(object):
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
raise ProbackupException(e.output.decode("utf-8"), self.cmd) raise ProbackupException(e.output.decode("utf-8"), self.cmd)
def run_binary(self, command, async=False):
try:
if async:
if self.verbose:
print(command)
return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.test_env)
else:
if self.verbose:
print(command)
self.output = subprocess.check_output(
command,
stderr=subprocess.STDOUT,
env=self.test_env
).decode("utf-8")
return self.output
except subprocess.CalledProcessError as e:
raise ProbackupException(e.output.decode("utf-8"), command)
def init_pb(self, backup_dir): def init_pb(self, backup_dir):
shutil.rmtree(backup_dir, ignore_errors=True) shutil.rmtree(backup_dir, ignore_errors=True)
@ -643,6 +661,9 @@ class ProbackupTest(object):
else: else:
node.safe_psql("postgres", "select pg_switch_xlog()") node.safe_psql("postgres", "select pg_switch_xlog()")
def get_version(self, node):
return testgres.get_config()["VERSION_NUM"]
def del_test_dir(self, module_name, fname): def del_test_dir(self, module_name, fname):
""" Del testdir and optimistically try to del module dir""" """ Del testdir and optimistically try to del module dir"""
try: try:

View File

@ -102,3 +102,51 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# Clean after yourself # Clean after yourself
self.del_test_dir(module_name, fname) self.del_test_dir(module_name, fname)
# @unittest.skip("123")
def test_retention_wal(self):
"""purge backups using window-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100500) i")
# Take FULL BACKUP
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100500) i")
self.backup_node(backup_dir, 'node', node)
backups = os.path.join(backup_dir, 'backups', 'node')
days_delta = 5
for backup in os.listdir(backups):
if backup == 'pg_probackup.conf':
continue
with open(os.path.join(backups, backup, "backup.control"), "a") as conf:
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() - timedelta(days=days_delta)))
days_delta -= 1
# Make backup to be keeped
self.backup_node(backup_dir, 'node', node, backup_type="page")
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4)
# Purge backups
self.delete_expired(backup_dir, 'node')
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
# Clean after yourself
self.del_test_dir(module_name, fname)