1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2024-11-24 08:52:38 +02:00

validate rework, self.cmd and self.output added

This commit is contained in:
Grigory Smolkin 2017-05-25 12:53:33 +03:00
parent 777ab09f29
commit b1e849a588
14 changed files with 230 additions and 92 deletions

1
.gitignore vendored
View File

@ -29,6 +29,7 @@
/tests/__pycache__/
/tests/tmp_dirs/
/tests/*pyc
/helpers/*pyc
# Extra files
/datapagemap.c

View File

@ -0,0 +1,2 @@
__all__ = ['ptrack_helpers', 'expected_errors']
#from . import *

View File

@ -136,7 +136,8 @@ class ProbackupTest(object):
self.test_env["LC_MESSAGES"] = "C"
self.test_env["LC_TIME"] = "C"
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self.helpers_path = os.path.dirname(os.path.realpath(__file__))
self.dir_path = os.path.abspath(os.path.join(self.helpers_path, os.pardir))
try:
os.makedirs(os.path.join(self.dir_path, "tmp_dirs"))
except:
@ -213,12 +214,16 @@ class ProbackupTest(object):
os.close(file)
return md5_per_page
def get_ptrack_bits_per_page_for_fork(self, file, size):
def get_ptrack_bits_per_page_for_fork(self, node, file, size):
if self.get_pgpro_edition(node) == 'enterprise':
header_size = 48
else:
header_size = 24
ptrack_bits_for_fork = []
byte_size = os.path.getsize(file + '_ptrack')
byte_size_minus_header = byte_size - 24
byte_size_minus_header = byte_size - header_size
file = os.open(file + '_ptrack', os.O_RDONLY)
os.lseek(file, 24, 0)
os.lseek(file, header_size, 0)
lot_of_bytes = os.read(file, byte_size_minus_header)
for byte in lot_of_bytes:
byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1]
@ -295,7 +300,7 @@ class ProbackupTest(object):
def run_pb(self, command, async=False):
try:
#print [self.probackup_path] + command
self.cmd = [' '.join(map(str,[self.probackup_path] + command))]
if async is True:
return subprocess.Popen(
[self.probackup_path] + command,
@ -304,23 +309,21 @@ class ProbackupTest(object):
env=self.test_env
)
else:
output = subprocess.check_output(
self.output = subprocess.check_output(
[self.probackup_path] + command,
stderr=subprocess.STDOUT,
env=self.test_env
)
if command[0] == 'backup':
if '-q' in command or '--quiet' in command:
return None
elif '-v' in command or '--verbose' in command:
return output
else:
# return backup ID
for line in output.splitlines():
if 'INFO: Backup' and 'completed' in line:
return line.split()[2]
# return backup ID
for line in self.output.splitlines():
if 'INFO: Backup' and 'completed' in line:
return line.split()[2]
# backup_id = line.split()[2]
# return {'cmd': cmd, 'output': output, 'backup_id': backup_id}
else:
return output
return self.output
# return {'cmd': cmd, 'output': output}
except subprocess.CalledProcessError as e:
raise ProbackupException(e.output, e.cmd)
@ -481,25 +484,34 @@ class ProbackupTest(object):
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
return out_dict
def set_archiving_conf(self, node, archive_dir):
def set_archiving_conf(self, node, archive_dir=False, replica=False):
if not archive_dir:
archive_dir = self.arcwal_dir(node)
if replica:
archive_mode = 'always'
node.append_conf('postgresql.auto.conf', 'hot_standby = on')
else:
archive_mode = 'on'
node.append_conf(
"postgresql.auto.conf",
"wal_level = archive"
)
node.append_conf(
"postgresql.auto.conf",
"archive_mode = on"
"archive_mode = {0}".format(archive_mode)
)
if os.name == 'posix':
node.append_conf(
"postgresql.auto.conf",
"archive_command = 'test ! -f {0}/%f && cp %p {0}/%f'".format(archive_dir)
)
elif os.name == 'nt':
node.append_conf(
"postgresql.auto.conf",
"archive_command = 'copy %p {0}\\%f'".format(archive_dir)
)
#elif os.name == 'nt':
# node.append_conf(
# "postgresql.auto.conf",
# "archive_command = 'copy %p {0}\\%f'".format(archive_dir)
# )
def wrong_wal_clean(self, node, wal_size):
wals_dir = os.path.join(self.backup_dir(node), "wal")
@ -517,4 +529,9 @@ class ProbackupTest(object):
var = node.execute("postgres", "select setting from pg_settings where name = 'wal_block_size'")
return int(var[0][0])
# def ptrack_node(self, ptrack_enable=False, wal_level='minimal', max_wal_senders='2', allow_replication=True)
def get_pgpro_edition(self, node):
if node.execute("postgres", "select exists(select 1 from pg_proc where proname = 'pgpro_edition')")[0][0]:
var = node.execute("postgres", "select pgpro_edition()")
return str(var[0][0])
else:
return False

View File

@ -45,7 +45,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything, vacuum it and make PTRACK BACKUP
@ -62,7 +62,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
@ -81,7 +81,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])

View File

@ -63,7 +63,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
@ -120,7 +120,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
@ -177,7 +177,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
@ -234,7 +234,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
@ -291,7 +291,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -50,7 +50,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
# check that ptrack has correct bits after recovery
self.check_ptrack_recovery(idx_ptrack[i])

View File

@ -52,7 +52,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
for i in idx_ptrack:
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
# check that ptrack has correct bits after recovery
self.check_ptrack_recovery(idx_ptrack[i])

View File

@ -51,7 +51,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Delete some rows, vacuum it and make checkpoint
@ -69,7 +69,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -62,7 +62,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -62,7 +62,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -65,7 +65,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity, the most important part
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -64,7 +64,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -13,16 +13,15 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ReplicaTest, self).__init__(*args, **kwargs)
@classmethod
def tearDownClass(cls):
stop_all()
# @classmethod
# def tearDownClass(cls):
# stop_all()
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_make_simple_replica(self):
def test_replica_stream_full_backup(self):
"""
make node with archiving, make stream backup,
get Recovery Time, try to make pitr to Recovery Time
make full stream backup from replica
"""
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/master".format(fname),
@ -55,9 +54,69 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
slave.append_conf('recovery.conf',
"primary_conninfo = 'user=gsmol port={0} sslmode=prefer sslcompression=1'".format(master.port))
slave.start({"-t": "600"})
# Replica Ready
# Check replica
after = slave.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# master.execute("postgres", "checkpoint")
master.execute("postgres", "create table t1(a int)")
# Make backup from replica
self.assertEqual(self.init_pb(slave), six.b(""))
self.backup_pb(slave, backup_type='full', options=['--stream'])
@unittest.skip("skip")
def test_replica_archive_full_backup(self):
"""
make full archive backup from replica
"""
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/master".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
master.start()
slave = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/slave".format(fname))
slave_port = slave.port
slave.cleanup()
self.assertEqual(self.init_pb(master), six.b(""))
self.backup_pb(master, backup_type='full', options=['--stream'])
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
before = master.execute("postgres", "SELECT * FROM t_heap")
id = self.backup_pb(master, backup_type='page', options=['--stream'])
self.restore_pb(backup_dir=self.backup_dir(master), data_dir=slave.data_dir)
# Settings for Replica
slave.append_conf('postgresql.auto.conf', 'port = {0}'.format(slave.port))
slave.append_conf('postgresql.auto.conf', 'hot_standby = on')
# Set Archiving for replica
self.set_archiving_conf(slave, replica=True)
slave.append_conf('recovery.conf', "standby_mode = 'on'")
slave.append_conf('recovery.conf',
"primary_conninfo = 'user=gsmol port={0} sslmode=prefer sslcompression=1'".format(master.port))
slave.start({"-t": "600"})
# Replica Started
# master.execute("postgres", "checkpoint")
# Check replica
after = slave.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Make backup from replica
self.assertEqual(self.init_pb(slave), six.b(""))
self.backup_pb(slave, backup_type='full', options=['--archive-timeout=30'])
self.validate_pb(slave)

View File

@ -1,11 +1,13 @@
import unittest
import os
import six
from .ptrack_helpers import ProbackupTest, ProbackupException
from helpers.ptrack_helpers import ProbackupTest, ProbackupException
#from helpers.expected_errors import satisfying_full_backup_not_found, wal_corruption
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess
from sys import exit
import re
class ValidateTest(ProbackupTest, unittest.TestCase):
@ -17,10 +19,14 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
def tearDownClass(cls):
stop_all()
#@unittest.skip("skip")
#@unittest.expectedFailure
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_validate_wal_unreal_values(self):
"""recovery to latest from full backup"""
"""
make node with archiving
make archive backup
validate to both real and unreal values
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
set_archiving=True,
@ -35,7 +41,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
con.execute("CREATE TABLE tbl0005 (a text)")
con.commit()
self.backup_pb(node)
backup_id = self.backup_pb(node)
node.pgbench_init(scale=2)
pgbench = node.pgbench(
@ -47,32 +53,32 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
pgbench.wait()
pgbench.stdout.close()
backup_id = self.show_pb(node)[0]['ID']
target_time = self.show_pb(node)[0]['Recovery time']
after_backup_time = datetime.now()
target_time = self.show_pb(node, id=backup_id)['recovery-time']
after_backup_time = datetime.now().replace(second=0, microsecond=0)
# Validate to real time
self.assertIn(six.b("INFO: backup validation completed successfully on"),
self.validate_pb(node, options=["--time='{0}'".format(target_time)]))
# Validate to unreal time
unreal_time_1 = after_backup_time - timedelta(days=2)
try:
self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format(
after_backup_time - timedelta(days=2))])
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Error in validation is expected because of validation of unreal time")
self.validate_pb(node, options=["--time='{0}'".format(unreal_time_1)])
self.assertEqual(1, 0, "Expecting Error because of validation to unreal time.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException, e:
self.assertEqual(
e.message,
'ERROR: Full backup satisfying target options is not found.\n')
self.assertEqual(e.message, 'ERROR: Full backup satisfying target options is not found.\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Validate to unreal time #2
unreal_time_2 = after_backup_time + timedelta(days=2)
try:
self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format(
after_backup_time + timedelta(days=2))])
self.assertEqual(1, 0, "Error in validation is expected because of validation unreal time")
self.validate_pb(node, options=["--time='{0}'".format(unreal_time_2)])
self.assertEqual(1, 0, "Expecting Error because of validation to unreal time.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException, e:
self.assertEqual(True, 'ERROR: not enough WAL records to time' in e.message)
self.assertTrue(re.match('WARNING: recovery can be done up to time [0-9-: ]+ and xid \d+\nERROR: not enough WAL records to time {0}\n\Z'.format(unreal_time_2), e.message),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Validate to real xid
target_xid = None
@ -83,21 +89,31 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
node.execute("postgres", "SELECT pg_switch_xlog()")
self.assertIn(six.b("INFO: backup validation completed successfully on"),
self.validate_pb(node, options=["--xid=%s" % target_xid]))
self.validate_pb(node, options=["--xid={0}".format(target_xid)]))
# Validate to unreal xid
unreal_xid = int(target_xid) + 1000
try:
self.validate_pb(node, options=["--xid=%d" % (int(target_xid) + 1000)])
self.assertEqual(1, 0, "Error in validation is expected because of validation of unreal xid")
self.validate_pb(node, options=["--xid={0}".format(unreal_xid)])
self.assertEqual(1, 0, "Expecting Error because of validation to unreal xid.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException, e:
self.assertEqual(True, 'ERROR: not enough WAL records to xid' in e.message)
self.assertTrue(re.match('WARNING: recovery can be done up to time [0-9-: ]+ and xid \d+\nERROR: not enough WAL records to xid \d+\n\Z', e.message),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Validate with backup ID
self.assertIn(six.b("INFO: backup validation completed successfully on"),
self.validate_pb(node, backup_id))
def test_validate_broken_wal_1(self):
"""recovery to latest from full backup"""
# @unittest.skip("skip")
def test_validate_corrupt_wal_1(self):
"""
make node with archiving
make archive backup
corrupt all wal files
run validate, expecting error because of wal corruption
make sure that backup status is 'CORRUPT'
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
set_archiving=True,
@ -126,16 +142,25 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# Simple validate
try:
self.validate_pb(node)
self.assertEqual(1, 0, "Expecting Error because of wal corruption. THIS IS BAD")
self.assertEqual(1, 0, "Expecting Error because of wal segments corruption.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException, e:
self.assertEqual(True, 'Possible WAL CORRUPTION' in e.message)
#TODO assert correct error message
self.assertTrue(re.match('Possible WAL CORRUPTION\Z', e.message),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
self.assertEqual('CORRUPT', self.show_pb(node, id=backup_id)['status'], 'Backup STATUS should be "CORRUPT"')
node.stop()
def test_validate_broken_wal_2(self):
"""recovery to latest from full backup"""
# @unittest.skip("skip")
def test_validate_corrupt_wal_2(self):
"""
make node with archiving
make archive backup
corrupt all wal files
run validate to real xid, expecting error because of wal corruption
make sure that backup status is 'CORRUPT'
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
set_archiving=True,
@ -167,19 +192,28 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
f.write(six.b("blablabla"))
f.close
# Simple validate
# Validate to xid
try:
self.validate_pb(node, backup_id, options=['--xid=%s' % target_xid])
self.assertEqual(1, 0, "Expecting Error because of wal corruption. THIS IS BAD")
self.validate_pb(node, backup_id, options=['--xid={0}'.format(target_xid)])
self.assertEqual(1, 0, "Expecting Error because of wal segments corruption.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException, e:
self.assertEqual(True, 'Possible WAL CORRUPTION' in e.message)
#TODO assert correct error message
self.assertTrue(re.match('Possible WAL CORRUPTION\Z', e.message),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
self.assertEqual('CORRUPT', self.show_pb(node, id=backup_id)['status'], 'Backup STATUS should be "CORRUPT"')
node.stop()
@unittest.skip("skip")
# @unittest.skip("skip")
def test_validate_wal_lost_segment_1(self):
"""Loose segment which belong to some backup"""
"""
make node with archiving
make archive backup
delete from archive wal segment which belong to previous backup
run validate, expecting error because of missing wal segment
make sure that backup status is 'CORRUPT'
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
set_archiving=True,
@ -202,18 +236,38 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# Delete wal segment
wals_dir = os.path.join(self.backup_dir(node), "wal")
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
os.remove(os.path.join(self.backup_dir(node), "wal", wals[1]))
file = os.path.join(self.backup_dir(node), "wal", wals[1])
os.remove(file)
try:
self.validate_pb(node)
self.assertEqual(1, 0, "Expecting Error because of wal segment disappearance")
self.assertEqual(1, 0, "Expecting Error because of wal segment disappearance.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException, e:
self.assertEqual(True, 'is absent' in e.message)
self.assertTrue(re.match('WARNING: WAL segment "{0}" is absent\nERROR: there are not enough WAL records to restore from [0-9a-fA-F\/]+ to [0-9a-fA-F\/]+\n\Z'.format(
file), e.message),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
self.assertEqual('CORRUPT', self.show_pb(node, id=backup_id)['status'], 'Backup STATUS should be "CORRUPT"')
self.assertEqual('CORRUPT', self.show_pb(node, id=backup_id)['status'], 'Backup {0} should have STATUS "CORRUPT"')
# Be paranoid and run validate again
try:
self.validate_pb(node)
self.assertEqual(1, 0, "Expecting Error because of backup corruption.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException, e:
self.assertTrue(re.match('ERROR: Backup {0} has status: CORRUPT\n\Z'.format(backup_id), e.message),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
node.stop()
# @unittest.skip("skip")
def test_validate_wal_lost_segment_2(self):
"""Loose segment located between backups"""
"""
make node with archiving
make archive backup
delete from archive wal segment which DO NOT belong to previous backup
run validate, expecting error because of missing wal segment
make sure that backup status is 'ERROR'
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
set_archiving=True,
@ -239,12 +293,17 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
wals_dir = os.path.join(self.backup_dir(node), "wal")
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
wals = map(int, wals)
os.remove(os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals))))
file = os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals)))
os.remove(file)
# Need more accurate error message about loosing wal segment between backups
try:
self.backup_pb(node, backup_type='page')
self.assertEqual(1, 0, "Expecting Error in PAGE backup because of wal segment disappearance")
backup_id = self.backup_pb(node, backup_type='page')
self.assertEqual(1, 0, "Expecting Error because of wal segment disappearance.\n Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException, e:
self.assertEqual(True, 'is absent' in e.message)
self.assertTrue(re.match('INFO: wait for LSN [0-9a-fA-F\/]+ in archived WAL segment .*\nWARNING: could not read WAL record at [0-9a-fA-F\/]+\nERROR: WAL segment "{0}" is absent\n\Z'.format(
file), e.message),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
self.assertEqual('ERROR', self.show_pb(node)[1]['Status'], 'Backup {0} should have STATUS "ERROR"')
node.stop()