1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2024-11-28 09:33:54 +02:00

pgpro589 added

This commit is contained in:
Grigory Smolkin 2017-05-18 13:01:30 +03:00
parent ff03146366
commit 2b02bd4094
9 changed files with 228 additions and 124 deletions

View File

@ -5,13 +5,16 @@ from . import init_test, option_test, show_test, \
retention_test, ptrack_clean, ptrack_cluster, \
ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \
ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
ptrack_vacuum_full, ptrack_vacuum_truncate, pgpro561, pgpro668
ptrack_vacuum_full, ptrack_vacuum_truncate, common_archive_test, \
pgpro561, pgpro688, pgpro702, pgpro589
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromModule(pgpro561))
suite.addTests(loader.loadTestsFromModule(pgpro668))
suite.addTests(loader.loadTestsFromModule(pgpro589))
suite.addTests(loader.loadTestsFromModule(pgpro688))
suite.addTests(loader.loadTestsFromModule(pgpro702))
suite.addTests(loader.loadTestsFromModule(init_test))
suite.addTests(loader.loadTestsFromModule(option_test))
suite.addTests(loader.loadTestsFromModule(show_test))

View File

@ -57,3 +57,6 @@ class CommonArchiveDir(ProbackupTest, unittest.TestCase):
res = node1.safe_psql("postgres", "select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL")
self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip()))
master.stop()
node1.stop()

60
tests/pgpro589.py Normal file
View File

@ -0,0 +1,60 @@
import unittest
import os
import six
from .ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess
from sys import exit
class LsnCheck(ProbackupTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
super(LsnCheck, self).__init__(*args, **kwargs)
# @classmethod
# def tearDownClass(cls):
# stop_all()
# @unittest.expectedFailure
def test_pgpro589(self):
"""
https://jira.postgrespro.ru/browse/PGPRO-589
make node without archive support, make backup which should fail
check that no files where copied to backup catalogue
EXPECTED TO FAIL
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/pgpro589/{0}/node".format(fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
node.start()
node.pgbench_init(scale=5)
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
options=["-c", "4", "-T", "10"]
)
pgbench.wait()
pgbench.stdout.close()
path = node.safe_psql("postgres", "select pg_relation_filepath('pgbench_accounts')").rstrip()
self.assertEqual(self.init_pb(node), six.b(""))
proc = self.backup_pb(
node, backup_type='full', options=['--archive-timeout=1'], async=True)
content = proc.stderr.read()
self.assertEqual(True, 'wait for LSN' in repr(content),
'No Wait for LSN')
self.assertEqual(True, 'could not be archived' in repr(content),
'No Fail Archiving Message')
id = self.show_pb(node)[0]['ID']
self.assertEqual('ERROR', self.show_pb(node, id=id)['status'], 'Backup should have ERROR status')
#print self.backup_dir(node)
file = os.path.join(self.backup_dir(node), 'backups', id, 'database', path)
self.assertEqual(False, os.path.isfile(file),
'\n Start LSN was not found in archive but datafiles where copied to backup catalogue.\n For example: {0}\n It is not optimal'.format(file))

View File

@ -1,110 +0,0 @@
import unittest
import os
import six
from .ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess
from sys import exit
class RecoveryWithTimeTarget(ProbackupTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
super(RecoveryWithTimeTarget, self).__init__(*args, **kwargs)
# @classmethod
# def tearDownClass(cls):
# stop_all()
def test_archive_node_backup_stream_restore_to_recovery_time(self):
"""
make node with archiving, make stream backup,
get Recovery Time, try to make pitr to Recovery Time
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/pgpro668/{0}".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
recovery_time = self.show_pb(node, id=id)['recovery-time']
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.cleanup()
self.restore_pb(node, options=['--time="{0}"'.format(recovery_time)])
node.start({"-t": "600"})
self.assertEqual(True, node.status())
def test_validate_to_recovery_time(self):
"""
make node with archiving, make stream backup,
get Recovery Time, validate to Recovery Time
EXPECT VALIDATE TO FAIL
Waiting PGPRO-688
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/pgpro668/{0}".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
recovery_time = self.show_pb(node, id=id)['recovery-time']
# Optional
#node.psql("postgres", "select pg_create_restore_point('123')")
#node.psql("postgres", "select txid_current()")
#node.psql("postgres", "select pg_switch_xlog()")
self.assertIn(six.b("INFO: backup validation completed successfully on"),
self.validate_pb(node, options=["--time='{0}'".format(recovery_time)]))
####
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.cleanup()
self.restore_pb(node, options=['--time="{0}"'.format(recovery_time)])
node.start({"-t": "600"})
self.assertEqual(True, node.status())
def test_archive_node_backup_stream_additional_commit_pitr(self):
"""
make node with archiving, make stream backup, create table t_heap,
try to make pitr to Recovery Time, check that t_heap do not exists
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/pgpro668/{0}".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
node.psql("postgres", "create table t_heap(a int)")
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.cleanup()
recovery_time = self.show_pb(node, id=id)['recovery-time']
self.restore_pb(node,
options=["-j", "4", '--time="{0}"'.format(recovery_time)]
)
node.start({"-t": "600"})
res = node.psql("postgres", 'select * from t_heap')
self.assertEqual(True, 'does not exist' in res[2])
# Need test for validate time with autonomous backup without archiving
# We need to forbid validation of autonomous backup by time or xid
# if archiving is not set

45
tests/pgpro688.py Normal file
View File

@ -0,0 +1,45 @@
import unittest
import os
import six
from .ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess
from sys import exit
class ValidateTime(ProbackupTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ValidateTime, self).__init__(*args, **kwargs)
# @classmethod
# def tearDownClass(cls):
# stop_all()
def test_validate_recovery_time(self):
"""
make node with archiving, make backup,
get Recovery Time, validate to Recovery Time
EXPECT VALIDATE TO FAIL
Waiting PGPRO-688
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/pgpro668/{0}".format(fname),
set_archiving=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full')
recovery_time = self.show_pb(node, id=id)['recovery-time']
# Optional
#node.psql("postgres", "select pg_create_restore_point('123')")
#node.psql("postgres", "select txid_current()")
#node.psql("postgres", "select pg_switch_xlog()")
self.assertIn(six.b("INFO: backup validation completed successfully on"),
self.validate_pb(node, options=["--time='{0}'".format(recovery_time)]))
####

39
tests/pgpro702.py Normal file
View File

@ -0,0 +1,39 @@
import unittest
import os
import six
from .ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess
from sys import exit
class ValidateTime(ProbackupTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ValidateTime, self).__init__(*args, **kwargs)
# @classmethod
# def tearDownClass(cls):
# stop_all()
def test_validate_recovery_time(self):
"""
make node without archiving, make stream backup,
get Recovery Time, validate to Recovery Time
EXPECT VALIDATE TO FAIL
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/pgpro702/{0}".format(fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
recovery_time = self.show_pb(node, id=id)['recovery-time']
self.assertIn(six.b("INFO: backup validation completed successfully on"),
self.validate_pb(node, options=["--time='{0}'".format(recovery_time)]))

View File

@ -292,17 +292,27 @@ class ProbackupTest(object):
success = False
self.assertEqual(success, True)
def run_pb(self, command):
def run_pb(self, command, async=False):
try:
# print [self.probackup_path] + command
output = subprocess.check_output(
[self.probackup_path] + command,
stderr=subprocess.STDOUT,
env=self.test_env
)
if async is True:
return subprocess.Popen(
[self.probackup_path] + command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.test_env
)
else:
output = subprocess.check_output(
[self.probackup_path] + command,
stderr=subprocess.STDOUT,
env=self.test_env
)
if command[0] == 'backup':
if '-q' in command or '--quiet' in command:
return None
elif '-v' in command or '--verbose' in command:
return output
else:
# return backup ID
return output.split()[2]
@ -322,7 +332,7 @@ class ProbackupTest(object):
def clean_pb(self, node):
shutil.rmtree(self.backup_dir(node), ignore_errors=True)
def backup_pb(self, node=None, data_dir=None, backup_dir=None, backup_type="full", options=[]):
def backup_pb(self, node=None, data_dir=None, backup_dir=None, backup_type="full", options=[], async=False):
if data_dir is None:
data_dir = node.data_dir
if backup_dir is None:
@ -338,7 +348,7 @@ class ProbackupTest(object):
if backup_type:
cmd_list += ["-b", backup_type]
return self.run_pb(cmd_list + options)
return self.run_pb(cmd_list + options, async)
def restore_pb(self, node=None, backup_dir=None, data_dir=None, id=None, options=[]):
if data_dir is None:

View File

@ -702,14 +702,66 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.restore_pb(node,
options=["-T", "%s=%s" % (tblspc_path, tblspc_path_new)])
# )
# Check tables
# Check tables
node.start()
count = node.execute("postgres", "SELECT count(*) FROM tbl")
self.assertEqual(count[0][0], 4)
count = node.execute("postgres", "SELECT count(*) FROM tbl1")
self.assertEqual(count[0][0], 4)
node.stop()
def test_archive_node_backup_stream_restore_to_recovery_time(self):
"""
make node with archiving, make stream backup,
get Recovery Time, try to make PITR to Recovery Time
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/pgpro668/{0}".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
recovery_time = self.show_pb(node, id=id)['recovery-time']
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.cleanup()
self.restore_pb(node, options=['--time="{0}"'.format(recovery_time)])
node.start({"-t": "600"})
self.assertEqual(True, node.status())
node.stop()
def test_archive_node_backup_stream_additional_commit_pitr(self):
"""
make node with archiving, make stream backup, create table t_heap,
try to make pitr to Recovery Time, check that t_heap do not exists
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
node.psql("postgres", "create table t_heap(a int)")
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.cleanup()
recovery_time = self.show_pb(node, id=id)['recovery-time']
self.restore_pb(node,
options=["-j", "4", '--time="{0}"'.format(recovery_time)]
)
node.start({"-t": "600"})
res = node.psql("postgres", 'select * from t_heap')
self.assertEqual(True, 'does not exist' in res[2])
node.stop()

View File

@ -31,6 +31,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
)
node.start()
node.pgbench_init(scale=2)
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
@ -67,6 +68,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
with open(os.path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, options=["--verbose"]))
node.pgbench_init(scale=2)
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,