1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2024-11-28 09:33:54 +02:00

PGPRO-688 tests added

This commit is contained in:
Grigory Smolkin 2017-05-15 02:43:05 +03:00
parent 6db02b6401
commit 442cef1ca5
5 changed files with 174 additions and 33 deletions

View File

@ -5,11 +5,12 @@ from . import init_test, option_test, show_test, \
retention_test, ptrack_clean, ptrack_cluster, \ retention_test, ptrack_clean, ptrack_cluster, \
ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \ ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \
ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \ ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
ptrack_vacuum_full, ptrack_vacuum_truncate ptrack_vacuum_full, ptrack_vacuum_truncate, pgpro668
def load_tests(loader, tests, pattern): def load_tests(loader, tests, pattern):
suite = unittest.TestSuite() suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromModule(pgpro668))
suite.addTests(loader.loadTestsFromModule(init_test)) suite.addTests(loader.loadTestsFromModule(init_test))
suite.addTests(loader.loadTestsFromModule(option_test)) suite.addTests(loader.loadTestsFromModule(option_test))
suite.addTests(loader.loadTestsFromModule(show_test)) suite.addTests(loader.loadTestsFromModule(show_test))

View File

@ -114,10 +114,13 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("123") # @unittest.skip("123")
def test_ptrack_threads(self): def test_ptrack_threads(self):
"""ptrack multi thread backup mode""" """ptrack multi thread backup mode"""
node = self.make_bnode( fname = self.id().split('.')[3]
base_dir="tmp_dirs/backup/ptrack_threads_4", print '{0} started'.format(fname)
options={"ptrack_enable": "on", 'max_wal_senders': '2'} node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
) set_archiving=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', "ptrack_enable": "on", 'max_wal_senders': '2'}
)
node.start() node.start()
self.assertEqual(self.init_pb(node), six.b("")) self.assertEqual(self.init_pb(node), six.b(""))
@ -143,8 +146,6 @@ class BackupTest(ProbackupTest, unittest.TestCase):
initdb_params=['--data-checksums'], initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'} pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'}
) )
# node.append_conf("pg_hba.conf", "local replication all trust")
# node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
node.start() node.start()
self.assertEqual(self.init_pb(node), six.b("")) self.assertEqual(self.init_pb(node), six.b(""))
@ -165,5 +166,4 @@ class BackupTest(ProbackupTest, unittest.TestCase):
)) ))
self.assertEqual(self.show_pb(node)[1]['Status'], six.b("OK")) self.assertEqual(self.show_pb(node)[1]['Status'], six.b("OK"))
node.stop() node.stop()

112
tests/pgpro668.py Normal file
View File

@ -0,0 +1,112 @@
import unittest
import os
import six
from .ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess
from sys import exit
class SomeTest(ProbackupTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
super(SomeTest, self).__init__(*args, **kwargs)
# @classmethod
# def tearDownClass(cls):
# stop_all()
def test_archive_node_backup_stream_restore_to_recovery_time(self):
"""
make node with archiving, make stream backup,
get Recovery Time, try to make pitr to Recovery Time
"""
fname = self.id().split('.')[3]
print '{0} started'.format(fname)
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
recovery_time = self.show_pb(node, id=id)['recovery-time']
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.cleanup()
self.restore_pb(node, options=['--time="{0}"'.format(recovery_time)])
node.start({"-t": "600"})
self.assertEqual(True, node.status())
def test_validate_to_recovery_time(self):
"""
make node with archiving, make stream backup,
get Recovery Time, validate to Recovery Time
Should fail. Waiting PGPRO-688
"""
fname = self.id().split('.')[3]
print '{0} started'.format(fname)
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
recovery_time = self.show_pb(node, id=id)['recovery-time']
# Optional
#node.psql("postgres", "select pg_create_restore_point('123')")
#node.psql("postgres", "select txid_current()")
#node.psql("postgres", "select pg_switch_xlog()")
self.assertIn(six.b("INFO: backup validation completed successfully on"),
self.validate_pb(node, options=["--time='{0}'".format(recovery_time)]))
####
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.cleanup()
self.restore_pb(node, options=['--time="{0}"'.format(recovery_time)])
node.start({"-t": "600"})
self.assertEqual(True, node.status())
def test_archive_node_backup_stream_additional_commit_pitr(self):
"""
make node with archiving, make stream backup, create table t_heap,
try to make pitr to Recovery Time, check that t_heap do not exists
"""
fname = self.id().split('.')[3]
print '{0} started'.format(fname)
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
node.psql("postgres", "create table t_heap(a int)")
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.cleanup()
recovery_time = self.show_pb(node, id=id)['recovery-time']
self.restore_pb(node,
options=["-j", "4", '--time="{0}"'.format(recovery_time)]
)
node.start({"-t": "600"})
res = node.psql("postgres", 'select * from t_heap')
self.assertEqual(True, 'does not exist' in res[2])
# Need test for validate time with autonomous backup without archiving
# We need to forbid validation of autonomous backup by time or xid
# if archiving is not set

View File

@ -152,25 +152,25 @@ class ProbackupTest(object):
def backup_dir(self, node): def backup_dir(self, node):
return os.path.abspath("%s/backup" % node.base_dir) return os.path.abspath("%s/backup" % node.base_dir)
def make_bnode(self, base_dir=None, allows_streaming=False, options={}): # def make_bnode(self, base_dir=None, allows_streaming=False, options={}):
real_base_dir = os.path.join(self.dir_path, base_dir) # real_base_dir = os.path.join(self.dir_path, base_dir)
shutil.rmtree(real_base_dir, ignore_errors=True) # shutil.rmtree(real_base_dir, ignore_errors=True)
#
node = get_new_node('test', base_dir=real_base_dir) # node = get_new_node('test', base_dir=real_base_dir)
node.init(allows_streaming=allows_streaming) # node.init(allows_streaming=allows_streaming)
#
if not allows_streaming: # if not allows_streaming:
node.append_conf("postgresql.auto.conf", "wal_level = hot_standby") # node.append_conf("postgresql.auto.conf", "wal_level = hot_standby")
node.append_conf("postgresql.auto.conf", "archive_mode = on") # node.append_conf("postgresql.auto.conf", "archive_mode = on")
node.append_conf( # node.append_conf(
"postgresql.auto.conf", # "postgresql.auto.conf",
"""archive_command = 'cp "%%p" "%s/%%f"'""" % os.path.abspath(self.arcwal_dir(node)) # """archive_command = 'cp "%%p" "%s/%%f"'""" % os.path.abspath(self.arcwal_dir(node))
) # )
#
for key, value in six.iteritems(options): # for key, value in six.iteritems(options):
node.append_conf("postgresql.conf", "%s = %s" % (key, value)) # node.append_conf("postgresql.conf", "%s = %s" % (key, value))
#
return node # return node
# def print_started(self, fname): # def print_started(self, fname):
# print # print
@ -318,7 +318,7 @@ class ProbackupTest(object):
def run_pb(self, command): def run_pb(self, command):
try: try:
# print [self.probackup_path] + command print [self.probackup_path] + command
output = subprocess.check_output( output = subprocess.check_output(
[self.probackup_path] + command, [self.probackup_path] + command,
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
@ -417,12 +417,12 @@ class ProbackupTest(object):
body = body[::-1] body = body[::-1]
# split string in list with string for every header element # split string in list with string for every header element
header_split = re.split(" +", header) header_split = re.split(" +", header)
# CRUNCH, remove last item, because it empty, like that '' # CRUNCH, remove last item, because it`s empty, like that ''
header_split.pop() header_split.pop()
for backup_record in body: for backup_record in body:
# split string in list with string for every backup record element # split string in list with string for every backup record element
backup_record_split = re.split(" +", backup_record) backup_record_split = re.split(" +", backup_record)
# CRUNCH, remove last item, because it empty, like that '' # CRUNCH, remove last item, because it`s empty, like that ''
backup_record_split.pop() backup_record_split.pop()
if len(header_split) != len(backup_record_split): if len(header_split) != len(backup_record_split):
print warning.format( print warning.format(

View File

@ -5,6 +5,7 @@ from .ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta from datetime import datetime, timedelta
from testgres import stop_all from testgres import stop_all
import subprocess import subprocess
from sys import exit
class ValidateTest(ProbackupTest, unittest.TestCase): class ValidateTest(ProbackupTest, unittest.TestCase):
@ -19,6 +20,34 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# except: # except:
# pass # pass
# @unittest.skip("123")
def test_validate_time(self):
"""recovery to latest from full backup"""
fname = self.id().split('.')[3]
print '\n {0} started'.format(fname)
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
set_archiving=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
node.start()
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
options=["-c", "4", "-T", "10"]
)
pgbench.wait()
pgbench.stdout.close()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node)
recovery_time = self.show_pb(node, id=id)['recovery-time']
self.assertIn(six.b("INFO: backup validation completed successfully on"),
self.validate_pb(node, options=["--time='{0}'".format(recovery_time)]))
node.stop()
# @unittest.skip("123") # @unittest.skip("123")
def test_validate_wal_1(self): def test_validate_wal_1(self):
"""recovery to latest from full backup""" """recovery to latest from full backup"""
@ -176,6 +205,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
) )
node.stop() node.stop()
# @unittest.skip("123")
def test_validate_wal_lost_segment_2(self): def test_validate_wal_lost_segment_2(self):
"""Loose segment located between backups """ """Loose segment located between backups """
fname = self.id().split('.')[3] fname = self.id().split('.')[3]
@ -224,12 +254,10 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
) )
self.delete_pb(node, id=self.show_pb(node)[1]['ID']) self.delete_pb(node, id=self.show_pb(node)[1]['ID'])
##### Hole Smokes, Batman! We just lost a wal segment and know nothing about it ##### Hole Smokes, Batman! We just lost a wal segment and know nothing about it
##### We need archive-push ASAP ##### We need archive-push ASAP
self.backup_pb(node, backup_type='full') self.backup_pb(node, backup_type='full')
self.assertEqual(False, self.assertEqual(False, 'validation completed successfully' in self.validate_pb(node))
'validation completed successfully' in self.validate_pb(node))
######## ########
node.stop() node.stop()