mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2024-11-28 09:33:54 +02:00
ptrack, validate, retention fixes
This commit is contained in:
parent
d470effef8
commit
36e64407fc
@ -8,7 +8,6 @@ from . import init_test, option_test, show_test, \
|
|||||||
ptrack_vacuum_full, ptrack_vacuum_truncate
|
ptrack_vacuum_full, ptrack_vacuum_truncate
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def load_tests(loader, tests, pattern):
|
def load_tests(loader, tests, pattern):
|
||||||
suite = unittest.TestSuite()
|
suite = unittest.TestSuite()
|
||||||
suite.addTests(loader.loadTestsFromModule(init_test))
|
suite.addTests(loader.loadTestsFromModule(init_test))
|
||||||
@ -17,8 +16,8 @@ def load_tests(loader, tests, pattern):
|
|||||||
suite.addTests(loader.loadTestsFromModule(backup_test))
|
suite.addTests(loader.loadTestsFromModule(backup_test))
|
||||||
suite.addTests(loader.loadTestsFromModule(delete_test))
|
suite.addTests(loader.loadTestsFromModule(delete_test))
|
||||||
suite.addTests(loader.loadTestsFromModule(restore_test))
|
suite.addTests(loader.loadTestsFromModule(restore_test))
|
||||||
# suite.addTests(loader.loadTestsFromModule(validate_test))
|
suite.addTests(loader.loadTestsFromModule(validate_test))
|
||||||
# suite.addTests(loader.loadTestsFromModule(retention_test))
|
suite.addTests(loader.loadTestsFromModule(retention_test))
|
||||||
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
|
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
|
||||||
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
|
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
|
||||||
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
|
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
|
||||||
|
@ -133,11 +133,10 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
|||||||
with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf:
|
with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf:
|
||||||
conf.write("retention-redundancy=1\n")
|
conf.write("retention-redundancy=1\n")
|
||||||
|
|
||||||
# TODO AFTER PGPRO-505
|
self.assertEqual(
|
||||||
# self.assertEqual(
|
self.show_config(node)['retention-redundancy'],
|
||||||
# self.retention_show(node, ["--redundancy", "2"]),
|
six.b('1')
|
||||||
# six.b("# retention policy\nREDUNDANCY=2\n")
|
)
|
||||||
# )
|
|
||||||
|
|
||||||
# User cannot send --system-identifier parameter via command line
|
# User cannot send --system-identifier parameter via command line
|
||||||
try:
|
try:
|
||||||
|
@ -44,7 +44,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
# get path to heap and index files
|
# get path to heap and index files
|
||||||
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
|
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
# update path to heap and index files in case they`ve changed
|
# update path to heap and index files in case they`ve changed
|
||||||
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
||||||
# # get ptrack for every idx
|
# # get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||||
# check that ptrack bits are cleaned
|
# check that ptrack bits are cleaned
|
||||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
|
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
|
||||||
@ -80,7 +80,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
# update path to heap and index files in case they`ve changed
|
# update path to heap and index files in case they`ve changed
|
||||||
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
||||||
# # get ptrack for every idx
|
# # get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||||
# check that ptrack bits are cleaned
|
# check that ptrack bits are cleaned
|
||||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
|
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
|
||||||
|
@ -62,7 +62,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
|
|
||||||
# compare pages and check ptrack sanity
|
# compare pages and check ptrack sanity
|
||||||
@ -121,7 +121,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
|
|
||||||
# compare pages and check ptrack sanity
|
# compare pages and check ptrack sanity
|
||||||
@ -180,7 +180,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
|
|
||||||
# compare pages and check ptrack sanity
|
# compare pages and check ptrack sanity
|
||||||
@ -239,7 +239,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
|
|
||||||
# compare pages and check ptrack sanity
|
# compare pages and check ptrack sanity
|
||||||
@ -298,7 +298,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
|
|
||||||
# compare pages and check ptrack sanity
|
# compare pages and check ptrack sanity
|
||||||
|
@ -172,6 +172,8 @@ class ProbackupTest(object):
|
|||||||
|
|
||||||
return node
|
return node
|
||||||
|
|
||||||
|
# def print_started(self, fname):
|
||||||
|
# print
|
||||||
|
|
||||||
def make_simple_node(self, base_dir=None, set_replication=False,
|
def make_simple_node(self, base_dir=None, set_replication=False,
|
||||||
set_archiving=False, initdb_params=[], pg_options={}):
|
set_archiving=False, initdb_params=[], pg_options={}):
|
||||||
@ -234,23 +236,21 @@ class ProbackupTest(object):
|
|||||||
os.close(file)
|
os.close(file)
|
||||||
return md5_per_page
|
return md5_per_page
|
||||||
|
|
||||||
def get_ptrack_bits_per_for_fork(self, file, size):
|
def get_ptrack_bits_per_page_for_fork(self, file, size):
|
||||||
|
ptrack_bits_for_fork = []
|
||||||
byte_size = os.path.getsize(file + '_ptrack')
|
byte_size = os.path.getsize(file + '_ptrack')
|
||||||
byte_size_minus_header = byte_size - 24
|
byte_size_minus_header = byte_size - 24
|
||||||
file = os.open(file + '_ptrack', os.O_RDONLY)
|
file = os.open(file + '_ptrack', os.O_RDONLY)
|
||||||
os.lseek(file, 24, 0)
|
os.lseek(file, 24, 0)
|
||||||
lot_of_bytes = os.read(file, byte_size_minus_header)
|
lot_of_bytes = os.read(file, byte_size_minus_header)
|
||||||
ptrack_bits_per_for_fork = []
|
|
||||||
for byte in lot_of_bytes:
|
for byte in lot_of_bytes:
|
||||||
byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1]
|
byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1]
|
||||||
# byte_to_bits = (byte >> x) & 1 for x in range(7, -1, -1)
|
# byte_to_bits = (byte >> x) & 1 for x in range(7, -1, -1)
|
||||||
for bit in byte_inverted:
|
for bit in byte_inverted:
|
||||||
while len(ptrack_bits_per_for_fork) != size:
|
if len(ptrack_bits_for_fork) < size:
|
||||||
ptrack_bits_per_for_fork.append(int(bit))
|
ptrack_bits_for_fork.append(int(bit))
|
||||||
# print 'Size: {}'.format(size)
|
|
||||||
# print ptrack_bits_per_for_fork
|
|
||||||
os.close(file)
|
os.close(file)
|
||||||
return ptrack_bits_per_for_fork
|
return ptrack_bits_for_fork
|
||||||
|
|
||||||
def check_ptrack_sanity(self, idx_dict):
|
def check_ptrack_sanity(self, idx_dict):
|
||||||
success = True
|
success = True
|
||||||
@ -284,7 +284,7 @@ class ProbackupTest(object):
|
|||||||
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum])
|
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum])
|
||||||
print idx_dict
|
print idx_dict
|
||||||
if PageNum == 0 and idx_dict['type'] == 'spgist':
|
if PageNum == 0 and idx_dict['type'] == 'spgist':
|
||||||
print 'SPGIST is a special showflake, so don`t freat about losing ptrack for blknum 0'
|
print 'SPGIST is a special snowflake, so don`t fret about losing ptrack for blknum 0'
|
||||||
continue
|
continue
|
||||||
success = False
|
success = False
|
||||||
else:
|
else:
|
||||||
@ -468,19 +468,26 @@ class ProbackupTest(object):
|
|||||||
# print(cmd_list)
|
# print(cmd_list)
|
||||||
return self.run_pb(cmd_list + options)
|
return self.run_pb(cmd_list + options)
|
||||||
|
|
||||||
def retention_purge_pb(self, node, options=[]):
|
def delete_expired(self, node, options=[]):
|
||||||
cmd_list = [
|
cmd_list = [
|
||||||
"retention", "purge",
|
"delete", "--expired",
|
||||||
"-B", self.backup_dir(node),
|
"-B", self.backup_dir(node),
|
||||||
]
|
]
|
||||||
return self.run_pb(cmd_list + options)
|
return self.run_pb(cmd_list + options)
|
||||||
|
|
||||||
def retention_show(self, node, options=[]):
|
def show_config(self, node):
|
||||||
|
out_dict = {}
|
||||||
cmd_list = [
|
cmd_list = [
|
||||||
"config",
|
"show-config",
|
||||||
"-B", self.backup_dir(node),
|
"-B", self.backup_dir(node),
|
||||||
]
|
]
|
||||||
return self.run_pb(cmd_list + options)
|
res = self.run_pb(cmd_list).splitlines()
|
||||||
|
for line in res:
|
||||||
|
if not line.startswith('#'):
|
||||||
|
name, var = line.partition(" = ")[::2]
|
||||||
|
out_dict[name] = var
|
||||||
|
return out_dict
|
||||||
|
|
||||||
|
|
||||||
def get_recovery_conf(self, node):
|
def get_recovery_conf(self, node):
|
||||||
out_dict = {}
|
out_dict = {}
|
||||||
|
@ -48,7 +48,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
# get path to heap and index files
|
# get path to heap and index files
|
||||||
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||||
# check that ptrack has correct bits after recovery
|
# check that ptrack has correct bits after recovery
|
||||||
self.check_ptrack_recovery(idx_ptrack[i])
|
self.check_ptrack_recovery(idx_ptrack[i])
|
||||||
|
@ -50,7 +50,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
for i in idx_ptrack:
|
for i in idx_ptrack:
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||||
# check that ptrack has correct bits after recovery
|
# check that ptrack has correct bits after recovery
|
||||||
self.check_ptrack_recovery(idx_ptrack[i])
|
self.check_ptrack_recovery(idx_ptrack[i])
|
||||||
|
@ -1,21 +1,8 @@
|
|||||||
import unittest
|
import unittest
|
||||||
from sys import exit
|
from sys import exit
|
||||||
from testgres import get_new_node, stop_all
|
from testgres import get_new_node, stop_all
|
||||||
#import os
|
|
||||||
from os import path, open, lseek, read, close, O_RDONLY
|
|
||||||
from .ptrack_helpers import ProbackupTest, idx_ptrack
|
from .ptrack_helpers import ProbackupTest, idx_ptrack
|
||||||
|
|
||||||
# res = node.execute('postgres', 'show fsync')
|
|
||||||
# print res[0][0]
|
|
||||||
# res = node.execute('postgres', 'show wal_level')
|
|
||||||
# print res[0][0]
|
|
||||||
# a = ProbackupTest
|
|
||||||
# res = node.execute('postgres', 'select 1')`
|
|
||||||
# self.assertEqual(len(res), 1)
|
|
||||||
# self.assertEqual(res[0][0], 1)
|
|
||||||
# node.stop()
|
|
||||||
# a = self.backup_dir(node)
|
|
||||||
|
|
||||||
|
|
||||||
class SimpleTest(ProbackupTest, unittest.TestCase):
|
class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
@ -63,7 +50,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.init_pb(node)
|
self.init_pb(node)
|
||||||
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
|
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
|
||||||
for i in idx_ptrack:
|
for i in idx_ptrack:
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||||
|
|
||||||
@ -81,7 +68,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
|
|
||||||
# compare pages and check ptrack sanity
|
# compare pages and check ptrack sanity
|
||||||
|
@ -59,7 +59,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
|
|
||||||
# compare pages and check ptrack sanity
|
# compare pages and check ptrack sanity
|
||||||
|
@ -59,7 +59,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
|
|
||||||
# compare pages and check ptrack sanity
|
# compare pages and check ptrack sanity
|
||||||
|
@ -26,8 +26,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
stop_all()
|
stop_all()
|
||||||
|
|
||||||
def test_ptrack_vacuum_full(self):
|
def test_ptrack_vacuum_full(self):
|
||||||
print 'test_ptrack_vacuum_full started'
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_vacuum_full",
|
print '{0} started'.format(fname)
|
||||||
|
node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname),
|
||||||
set_replication=True,
|
set_replication=True,
|
||||||
initdb_params=['--data-checksums', '-A trust'],
|
initdb_params=['--data-checksums', '-A trust'],
|
||||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||||
@ -73,7 +74,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
|
|
||||||
# compare pages and check ptrack sanity, the most important part
|
# compare pages and check ptrack sanity, the most important part
|
||||||
|
@ -61,7 +61,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
# get ptrack for every idx
|
# get ptrack for every idx
|
||||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
|
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||||
|
|
||||||
# compare pages and check ptrack sanity
|
# compare pages and check ptrack sanity
|
||||||
|
@ -14,10 +14,11 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(RestoreTest, self).__init__(*args, **kwargs)
|
super(RestoreTest, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
# @classmethod
|
@classmethod
|
||||||
# def tearDownClass(cls):
|
def tearDownClass(cls):
|
||||||
# stop_all()
|
stop_all()
|
||||||
|
|
||||||
|
# @unittest.skip("123")
|
||||||
def test_restore_full_to_latest(self):
|
def test_restore_full_to_latest(self):
|
||||||
"""recovery to latest from full backup"""
|
"""recovery to latest from full backup"""
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
@ -366,12 +367,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.skipTest("ptrack not supported")
|
self.skipTest("ptrack not supported")
|
||||||
return
|
return
|
||||||
|
|
||||||
# node.append_conf("pg_hba.conf", "local replication all trust")
|
|
||||||
# node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
|
|
||||||
# node.append_conf("postgresql.conf", "ptrack_enable = on")
|
|
||||||
# node.append_conf("postgresql.conf", "max_wal_senders = 1")
|
|
||||||
# node.restart()
|
|
||||||
|
|
||||||
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
|
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
|
||||||
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose", "--stream"]))
|
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose", "--stream"]))
|
||||||
|
|
||||||
@ -400,11 +395,12 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
node.stop()
|
node.stop()
|
||||||
|
|
||||||
def test_restore_full_ptrack_under_load(self):
|
def test_restore_full_ptrack_under_load(self):
|
||||||
"""recovery to latest from full + page backups with loads when ptrack backup do"""
|
"""recovery to latest from full + ptrack backups with loads when ptrack backup do"""
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
print '{0} started'.format(fname)
|
print '{0} started'.format(fname)
|
||||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||||
set_archiving=True,
|
set_archiving=True,
|
||||||
|
set_replication=True,
|
||||||
initdb_params=['--data-checksums'],
|
initdb_params=['--data-checksums'],
|
||||||
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'}
|
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'}
|
||||||
)
|
)
|
||||||
@ -417,11 +413,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
node.stop()
|
node.stop()
|
||||||
self.skipTest("ptrack not supported")
|
self.skipTest("ptrack not supported")
|
||||||
return
|
return
|
||||||
|
|
||||||
#node.append_conf("pg_hba.conf", "local replication all trust")
|
|
||||||
#node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
|
|
||||||
#node.append_conf("postgresql.conf", "ptrack_enable = on")
|
|
||||||
#node.append_conf("postgresql.conf", "max_wal_senders = 1")
|
|
||||||
node.restart()
|
node.restart()
|
||||||
|
|
||||||
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
|
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
|
||||||
@ -439,8 +430,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
pgbench.wait()
|
pgbench.wait()
|
||||||
pgbench.stdout.close()
|
pgbench.stdout.close()
|
||||||
|
|
||||||
node.execute("postgres", "SELECT pg_switch_xlog()")
|
|
||||||
|
|
||||||
bbalance = node.execute("postgres", "SELECT sum(bbalance) FROM pgbench_branches")
|
bbalance = node.execute("postgres", "SELECT sum(bbalance) FROM pgbench_branches")
|
||||||
delta = node.execute("postgres", "SELECT sum(delta) FROM pgbench_history")
|
delta = node.execute("postgres", "SELECT sum(delta) FROM pgbench_history")
|
||||||
|
|
||||||
@ -470,6 +459,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
print '{0} started'.format(fname)
|
print '{0} started'.format(fname)
|
||||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||||
set_archiving=True,
|
set_archiving=True,
|
||||||
|
set_replication=True,
|
||||||
initdb_params=['--data-checksums'],
|
initdb_params=['--data-checksums'],
|
||||||
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'}
|
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'}
|
||||||
)
|
)
|
||||||
@ -483,10 +473,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.skipTest("ptrack not supported")
|
self.skipTest("ptrack not supported")
|
||||||
return
|
return
|
||||||
|
|
||||||
#node.append_conf("pg_hba.conf", "local replication all trust")
|
|
||||||
#node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
|
|
||||||
#node.append_conf("postgresql.conf", "ptrack_enable = on")
|
|
||||||
#node.append_conf("postgresql.conf", "max_wal_senders = 1")
|
|
||||||
node.restart()
|
node.restart()
|
||||||
|
|
||||||
pgbench = node.pgbench(
|
pgbench = node.pgbench(
|
||||||
@ -504,8 +490,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log:
|
with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log:
|
||||||
backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "--stream"]))
|
backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose", "--stream"]))
|
||||||
|
|
||||||
node.execute("postgres", "SELECT pg_switch_xlog()")
|
|
||||||
|
|
||||||
bbalance = node.execute("postgres", "SELECT sum(bbalance) FROM pgbench_branches")
|
bbalance = node.execute("postgres", "SELECT sum(bbalance) FROM pgbench_branches")
|
||||||
delta = node.execute("postgres", "SELECT sum(delta) FROM pgbench_history")
|
delta = node.execute("postgres", "SELECT sum(delta) FROM pgbench_history")
|
||||||
|
|
||||||
@ -638,9 +622,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
'ERROR: restore tablespace destination is not empty: "{0}"\n'.format(tblspc_path)
|
'ERROR: restore tablespace destination is not empty: "{0}"\n'.format(tblspc_path)
|
||||||
)
|
)
|
||||||
|
|
||||||
# self.assertIn(six.b("ERROR: restore tablespace destination is not empty"),
|
|
||||||
# self.restore_pb(node))
|
|
||||||
|
|
||||||
# 3 - Restore using tablespace-mapping
|
# 3 - Restore using tablespace-mapping
|
||||||
tblspc_path_new = path.join(node.base_dir, "tblspc_new")
|
tblspc_path_new = path.join(node.base_dir, "tblspc_new")
|
||||||
# TODO WAITING FIX FOR RESTORE
|
# TODO WAITING FIX FOR RESTORE
|
||||||
@ -662,7 +643,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
show_pb = self.show_pb(node)
|
show_pb = self.show_pb(node)
|
||||||
self.assertEqual(show_pb[1]['Status'], six.b("OK"))
|
self.assertEqual(show_pb[1]['Status'], six.b("OK"))
|
||||||
self.assertEqual(show_pb[2]['Status'], six.b("OK"))#
|
self.assertEqual(show_pb[2]['Status'], six.b("OK"))
|
||||||
|
|
||||||
node.stop()
|
node.stop()
|
||||||
node.cleanup()
|
node.cleanup()
|
||||||
@ -677,7 +658,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
id = node.execute("postgres", "SELECT id FROM test OFFSET 1")
|
id = node.execute("postgres", "SELECT id FROM test OFFSET 1")
|
||||||
self.assertEqual(id[0][0], 2)
|
self.assertEqual(id[0][0], 2)
|
||||||
|
|
||||||
#node.stop()
|
node.stop()
|
||||||
|
|
||||||
def test_restore_with_tablespace_mapping_2(self):
|
def test_restore_with_tablespace_mapping_2(self):
|
||||||
"""recovery using tablespace-mapping option and page backup"""
|
"""recovery using tablespace-mapping option and page backup"""
|
||||||
@ -728,7 +709,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
node.cleanup()
|
node.cleanup()
|
||||||
|
|
||||||
tblspc_path_new = path.join(node.base_dir, "tblspc_new")
|
tblspc_path_new = path.join(node.base_dir, "tblspc_new")
|
||||||
print tblspc_path_new
|
|
||||||
# exit(1)
|
# exit(1)
|
||||||
# TODO WAITING FIX FOR RESTORE
|
# TODO WAITING FIX FOR RESTORE
|
||||||
# self.assertIn(six.b("INFO: restore complete."),
|
# self.assertIn(six.b("INFO: restore complete."),
|
||||||
|
@ -2,88 +2,104 @@ import unittest
|
|||||||
import os
|
import os
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from os import path, listdir
|
from os import path, listdir
|
||||||
from .pb_lib import ProbackupTest
|
from .ptrack_helpers import ProbackupTest
|
||||||
from testgres import stop_all
|
from testgres import stop_all
|
||||||
|
|
||||||
|
|
||||||
class RetentionTest(ProbackupTest, unittest.TestCase):
|
class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(RetentionTest, self).__init__(*args, **kwargs)
|
super(RetentionTest, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def tearDownClass(cls):
|
def tearDownClass(cls):
|
||||||
stop_all()
|
stop_all()
|
||||||
|
|
||||||
def test_retention_redundancy_1(self):
|
# @unittest.skip("123")
|
||||||
"""purge backups using redundancy-based retention policy"""
|
def test_retention_redundancy_1(self):
|
||||||
node = self.make_bnode(base_dir="tmp_dirs/retention/retention_redundancy_1")
|
"""purge backups using redundancy-based retention policy"""
|
||||||
node.start()
|
fname = self.id().split('.')[3]
|
||||||
|
print '{0} started'.format(fname)
|
||||||
|
node = self.make_simple_node(base_dir="tmp_dirs/retention/{0}".format(fname),
|
||||||
|
set_archiving=True,
|
||||||
|
initdb_params=['--data-checksums'],
|
||||||
|
pg_options={'wal_level': 'replica'}
|
||||||
|
)
|
||||||
|
|
||||||
self.init_pb(node)
|
node.start()
|
||||||
with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf:
|
|
||||||
conf.write("REDUNDANCY=1\n")
|
|
||||||
|
|
||||||
# Make backups to be purged
|
self.init_pb(node)
|
||||||
self.backup_pb(node)
|
with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf:
|
||||||
self.backup_pb(node, backup_type="page")
|
conf.write("retention-redundancy = 1\n")
|
||||||
# Make backups to be keeped
|
|
||||||
self.backup_pb(node)
|
|
||||||
self.backup_pb(node, backup_type="page")
|
|
||||||
|
|
||||||
self.assertEqual(len(self.show_pb(node)), 4)
|
# Make backups to be purged
|
||||||
|
self.backup_pb(node)
|
||||||
|
self.backup_pb(node, backup_type="page")
|
||||||
|
# Make backups to be keeped
|
||||||
|
self.backup_pb(node)
|
||||||
|
self.backup_pb(node, backup_type="page")
|
||||||
|
|
||||||
# Purge backups
|
self.assertEqual(len(self.show_pb(node)), 4)
|
||||||
log = self.retention_purge_pb(node)
|
|
||||||
self.assertEqual(len(self.show_pb(node)), 2)
|
|
||||||
|
|
||||||
# Check that WAL segments were deleted
|
# Purge backups
|
||||||
min_wal = None
|
log = self.delete_expired(node)
|
||||||
max_wal = None
|
self.assertEqual(len(self.show_pb(node)), 2)
|
||||||
for line in log.splitlines():
|
|
||||||
if line.startswith(b"INFO: removed min WAL segment"):
|
|
||||||
min_wal = line[31:-1]
|
|
||||||
elif line.startswith(b"INFO: removed max WAL segment"):
|
|
||||||
max_wal = line[31:-1]
|
|
||||||
for wal_name in listdir(path.join(self.backup_dir(node), "wal")):
|
|
||||||
if not wal_name.endswith(".backup"):
|
|
||||||
wal_name_b = wal_name.encode('ascii')
|
|
||||||
self.assertEqual(wal_name_b[8:] > min_wal[8:], True)
|
|
||||||
self.assertEqual(wal_name_b[8:] > max_wal[8:], True)
|
|
||||||
|
|
||||||
node.stop()
|
# Check that WAL segments were deleted
|
||||||
|
min_wal = None
|
||||||
|
max_wal = None
|
||||||
|
for line in log.splitlines():
|
||||||
|
if line.startswith(b"INFO: removed min WAL segment"):
|
||||||
|
min_wal = line[31:-1]
|
||||||
|
elif line.startswith(b"INFO: removed max WAL segment"):
|
||||||
|
max_wal = line[31:-1]
|
||||||
|
for wal_name in listdir(path.join(self.backup_dir(node), "wal")):
|
||||||
|
if not wal_name.endswith(".backup"):
|
||||||
|
wal_name_b = wal_name.encode('ascii')
|
||||||
|
self.assertEqual(wal_name_b[8:] > min_wal[8:], True)
|
||||||
|
self.assertEqual(wal_name_b[8:] > max_wal[8:], True)
|
||||||
|
|
||||||
def test_retention_window_2(self):
|
node.stop()
|
||||||
"""purge backups using window-based retention policy"""
|
|
||||||
node = self.make_bnode(base_dir="tmp_dirs/retention/retention_window_2")
|
|
||||||
node.start()
|
|
||||||
|
|
||||||
self.init_pb(node)
|
# @unittest.skip("123")
|
||||||
with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf:
|
def test_retention_window_2(self):
|
||||||
conf.write("REDUNDANCY=1\n")
|
"""purge backups using window-based retention policy"""
|
||||||
conf.write("WINDOW=1\n")
|
fname = self.id().split('.')[3]
|
||||||
|
print '{0} started'.format(fname)
|
||||||
|
node = self.make_simple_node(base_dir="tmp_dirs/retention/{0}".format(fname),
|
||||||
|
set_archiving=True,
|
||||||
|
initdb_params=['--data-checksums'],
|
||||||
|
pg_options={'wal_level': 'replica'}
|
||||||
|
)
|
||||||
|
|
||||||
# Make backups to be purged
|
node.start()
|
||||||
self.backup_pb(node)
|
|
||||||
self.backup_pb(node, backup_type="page")
|
|
||||||
# Make backup to be keeped
|
|
||||||
self.backup_pb(node)
|
|
||||||
|
|
||||||
backups = path.join(self.backup_dir(node), "backups")
|
self.init_pb(node)
|
||||||
days_delta = 5
|
with open(path.join(self.backup_dir(node), "pg_probackup.conf"), "a") as conf:
|
||||||
for backup in listdir(backups):
|
conf.write("retention-redundancy = 1\n")
|
||||||
with open(path.join(backups, backup, "backup.conf"), "a") as conf:
|
conf.write("retention-window = 1\n")
|
||||||
conf.write("RECOVERY_TIME='{:%Y-%m-%d %H:%M:%S}'\n".format(
|
|
||||||
datetime.now() - timedelta(days=days_delta)))
|
|
||||||
days_delta -= 1
|
|
||||||
|
|
||||||
# Make backup to be keeped
|
# Make backups to be purged
|
||||||
self.backup_pb(node, backup_type="page")
|
self.backup_pb(node)
|
||||||
|
self.backup_pb(node, backup_type="page")
|
||||||
|
# Make backup to be keeped
|
||||||
|
self.backup_pb(node)
|
||||||
|
|
||||||
self.assertEqual(len(self.show_pb(node)), 4)
|
backups = path.join(self.backup_dir(node), "backups")
|
||||||
|
days_delta = 5
|
||||||
|
for backup in listdir(backups):
|
||||||
|
with open(path.join(backups, backup, "backup.control"), "a") as conf:
|
||||||
|
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
|
||||||
|
datetime.now() - timedelta(days=days_delta)))
|
||||||
|
days_delta -= 1
|
||||||
|
|
||||||
# Purge backups
|
# Make backup to be keeped
|
||||||
self.retention_purge_pb(node)
|
self.backup_pb(node, backup_type="page")
|
||||||
self.assertEqual(len(self.show_pb(node)), 2)
|
|
||||||
|
|
||||||
node.stop()
|
self.assertEqual(len(self.show_pb(node)), 4)
|
||||||
|
|
||||||
|
# Purge backups
|
||||||
|
self.delete_expired(node)
|
||||||
|
self.assertEqual(len(self.show_pb(node)), 2)
|
||||||
|
|
||||||
|
node.stop()
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import unittest
|
import unittest
|
||||||
from os import path, listdir
|
import os
|
||||||
import six
|
import six
|
||||||
from .pb_lib import ProbackupTest
|
from .ptrack_helpers import ProbackupTest, ProbackupException
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from testgres import stop_all
|
from testgres import stop_all
|
||||||
import subprocess
|
import subprocess
|
||||||
@ -9,86 +9,227 @@ import subprocess
|
|||||||
|
|
||||||
class ValidateTest(ProbackupTest, unittest.TestCase):
|
class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(ValidateTest, self).__init__(*args, **kwargs)
|
super(ValidateTest, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
@classmethod
|
# @classmethod
|
||||||
def tearDownClass(cls):
|
# def tearDownClass(cls):
|
||||||
try:
|
# try:
|
||||||
stop_all()
|
# stop_all()
|
||||||
except:
|
# except:
|
||||||
pass
|
# pass
|
||||||
|
|
||||||
def test_validate_wal_1(self):
|
# @unittest.skip("123")
|
||||||
"""recovery to latest from full backup"""
|
def test_validate_wal_1(self):
|
||||||
node = self.make_bnode(base_dir="tmp_dirs/validate/wal_1")
|
"""recovery to latest from full backup"""
|
||||||
node.start()
|
fname = self.id().split('.')[3]
|
||||||
self.assertEqual(self.init_pb(node), six.b(""))
|
print '\n {0} started'.format(fname)
|
||||||
node.pgbench_init(scale=2)
|
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
|
||||||
with node.connect("postgres") as con:
|
set_archiving=True,
|
||||||
con.execute("CREATE TABLE tbl0005 (a text)")
|
initdb_params=['--data-checksums'],
|
||||||
con.commit()
|
pg_options={'wal_level': 'replica'}
|
||||||
|
)
|
||||||
|
|
||||||
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
|
node.start()
|
||||||
backup_log.write(self.backup_pb(node, options=["--verbose"]))
|
self.assertEqual(self.init_pb(node), six.b(""))
|
||||||
|
node.pgbench_init(scale=2)
|
||||||
|
with node.connect("postgres") as con:
|
||||||
|
con.execute("CREATE TABLE tbl0005 (a text)")
|
||||||
|
con.commit()
|
||||||
|
|
||||||
pgbench = node.pgbench(
|
with open(os.path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
|
||||||
stdout=subprocess.PIPE,
|
backup_log.write(self.backup_pb(node, options=["--verbose"]))
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
options=["-c", "4", "-T", "10"]
|
|
||||||
)
|
|
||||||
|
|
||||||
pgbench.wait()
|
pgbench = node.pgbench(
|
||||||
pgbench.stdout.close()
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
options=["-c", "4", "-T", "10"]
|
||||||
|
)
|
||||||
|
|
||||||
# Save time to validate
|
pgbench.wait()
|
||||||
target_time = datetime.now()
|
pgbench.stdout.close()
|
||||||
|
|
||||||
target_xid = None
|
id_backup = self.show_pb(node)[0]['ID']
|
||||||
with node.connect("postgres") as con:
|
target_time = self.show_pb(node)[0]['Recovery time']
|
||||||
res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
|
after_backup_time = datetime.now()
|
||||||
con.commit()
|
|
||||||
target_xid = res[0][0]
|
|
||||||
|
|
||||||
node.execute("postgres", "SELECT pg_switch_xlog()")
|
# Validate to real time
|
||||||
node.stop({"-m": "smart"})
|
self.assertIn(six.b("INFO: backup validation completed successfully on"),
|
||||||
|
self.validate_pb(node, options=["--time='{0}'".format(target_time)]))
|
||||||
|
|
||||||
id_backup = self.show_pb(node)[0].id
|
# Validate to unreal time
|
||||||
|
try:
|
||||||
|
self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format(
|
||||||
|
after_backup_time - timedelta(days=2))])
|
||||||
|
# we should die here because exception is what we expect to happen
|
||||||
|
exit(1)
|
||||||
|
except ProbackupException, e:
|
||||||
|
self.assertEqual(
|
||||||
|
e.message,
|
||||||
|
'ERROR: Full backup satisfying target options is not found.\n'
|
||||||
|
)
|
||||||
|
|
||||||
# Validate to real time
|
# Validate to unreal time #2
|
||||||
self.assertIn(six.b("INFO: backup validation completed successfully on"),
|
try:
|
||||||
self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format(
|
self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format(
|
||||||
target_time)]))
|
after_backup_time + timedelta(days=2))])
|
||||||
|
# we should die here because exception is what we expect to happen
|
||||||
|
exit(1)
|
||||||
|
except ProbackupException, e:
|
||||||
|
self.assertEqual(
|
||||||
|
True,
|
||||||
|
'ERROR: not enough WAL records to time' in e.message
|
||||||
|
)
|
||||||
|
|
||||||
# Validate to unreal time
|
# Validate to real xid
|
||||||
self.assertIn(six.b("ERROR: no full backup found, cannot validate."),
|
target_xid = None
|
||||||
self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format(
|
with node.connect("postgres") as con:
|
||||||
target_time - timedelta(days=2))]))
|
res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
|
||||||
|
con.commit()
|
||||||
|
target_xid = res[0][0]
|
||||||
|
node.execute("postgres", "SELECT pg_switch_xlog()")
|
||||||
|
|
||||||
# Validate to unreal time #2
|
self.assertIn(six.b("INFO: backup validation completed successfully on"),
|
||||||
self.assertIn(six.b("ERROR: not enough WAL records to time"),
|
self.validate_pb(node, options=["--xid=%s" % target_xid]))
|
||||||
self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format(
|
|
||||||
target_time + timedelta(days=2))]))
|
|
||||||
|
|
||||||
# Validate to real xid
|
# Validate to unreal xid
|
||||||
self.assertIn(six.b("INFO: backup validation completed successfully on"),
|
try:
|
||||||
self.validate_pb(node, options=["--xid=%s" % target_xid]))
|
self.validate_pb(node, options=["--xid=%d" % (int(target_xid) + 1000)])
|
||||||
|
# we should die here because exception is what we expect to happen
|
||||||
|
exit(1)
|
||||||
|
except ProbackupException, e:
|
||||||
|
self.assertEqual(
|
||||||
|
True,
|
||||||
|
'ERROR: not enough WAL records to xid' in e.message
|
||||||
|
)
|
||||||
|
|
||||||
# Validate to unreal xid
|
# Validate with backup ID
|
||||||
self.assertIn(six.b("ERROR: not enough WAL records to xid"),
|
self.assertIn(six.b("INFO: backup validation completed successfully on"),
|
||||||
self.validate_pb(node, options=["--xid=%d" % (int(target_xid) + 1000)]))
|
self.validate_pb(node, id_backup))
|
||||||
|
|
||||||
# Validate with backup ID
|
# Validate broken WAL
|
||||||
self.assertIn(six.b("INFO: backup validation completed successfully on"),
|
wals_dir = os.path.join(self.backup_dir(node), "wal")
|
||||||
self.validate_pb(node, id_backup))
|
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
|
||||||
|
wals.sort()
|
||||||
|
for wal in wals:
|
||||||
|
f = open(os.path.join(wals_dir, wal), "rb+")
|
||||||
|
f.seek(256)
|
||||||
|
f.write(six.b("blablabla"))
|
||||||
|
f.close
|
||||||
|
|
||||||
# Validate broken WAL
|
try:
|
||||||
wals_dir = path.join(self.backup_dir(node), "wal")
|
self.validate_pb(node, id_backup, options=['--xid=%s' % target_xid])
|
||||||
wals = [f for f in listdir(wals_dir) if path.isfile(path.join(wals_dir, f))]
|
# we should die here because exception is what we expect to happen
|
||||||
wals.sort()
|
exit(1)
|
||||||
with open(path.join(wals_dir, wals[-3]), "rb+") as f:
|
except ProbackupException, e:
|
||||||
f.seek(256)
|
self.assertEqual(
|
||||||
f.write(six.b("blablabla"))
|
True,
|
||||||
|
'Possible WAL CORRUPTION' in e.message
|
||||||
|
)
|
||||||
|
|
||||||
res = self.validate_pb(node, id_backup, options=['--xid=%s' % target_xid])
|
try:
|
||||||
self.assertIn(six.b("not enough WAL records to xid"), res)
|
self.validate_pb(node)
|
||||||
|
# we should die here because exception is what we expect to happen
|
||||||
|
exit(1)
|
||||||
|
except ProbackupException, e:
|
||||||
|
self.assertEqual(
|
||||||
|
True,
|
||||||
|
'Possible WAL CORRUPTION' in e.message
|
||||||
|
)
|
||||||
|
|
||||||
|
node.stop()
|
||||||
|
|
||||||
|
# @unittest.skip("123")
|
||||||
|
def test_validate_wal_lost_segment_1(self):
|
||||||
|
"""Loose segment which belong to some backup"""
|
||||||
|
fname = self.id().split('.')[3]
|
||||||
|
print '{0} started'.format(fname)
|
||||||
|
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
|
||||||
|
set_archiving=True,
|
||||||
|
initdb_params=['--data-checksums'],
|
||||||
|
pg_options={'wal_level': 'replica'}
|
||||||
|
)
|
||||||
|
|
||||||
|
node.start()
|
||||||
|
self.assertEqual(self.init_pb(node), six.b(""))
|
||||||
|
node.pgbench_init(scale=2)
|
||||||
|
pgbench = node.pgbench(
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
options=["-c", "4", "-T", "10"]
|
||||||
|
)
|
||||||
|
pgbench.wait()
|
||||||
|
pgbench.stdout.close()
|
||||||
|
self.backup_pb(node, backup_type='full')
|
||||||
|
|
||||||
|
wals_dir = os.path.join(self.backup_dir(node), "wal")
|
||||||
|
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
|
||||||
|
os.remove(os.path.join(self.backup_dir(node), "wal", wals[1]))
|
||||||
|
try:
|
||||||
|
self.validate_pb(node)
|
||||||
|
# we should die here because exception is what we expect to happen
|
||||||
|
exit(1)
|
||||||
|
except ProbackupException, e:
|
||||||
|
self.assertEqual(
|
||||||
|
True,
|
||||||
|
'is absent' in e.message
|
||||||
|
)
|
||||||
|
node.stop()
|
||||||
|
|
||||||
|
def test_validate_wal_lost_segment_2(self):
|
||||||
|
"""Loose segment located between backups """
|
||||||
|
fname = self.id().split('.')[3]
|
||||||
|
print '{0} started'.format(fname)
|
||||||
|
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
|
||||||
|
set_archiving=True,
|
||||||
|
initdb_params=['--data-checksums'],
|
||||||
|
pg_options={'wal_level': 'replica'}
|
||||||
|
)
|
||||||
|
|
||||||
|
node.start()
|
||||||
|
self.assertEqual(self.init_pb(node), six.b(""))
|
||||||
|
node.pgbench_init(scale=2)
|
||||||
|
pgbench = node.pgbench(
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
options=["-c", "4", "-T", "10"]
|
||||||
|
)
|
||||||
|
pgbench.wait()
|
||||||
|
pgbench.stdout.close()
|
||||||
|
self.backup_pb(node, backup_type='full')
|
||||||
|
|
||||||
|
# need to do that to find segment between(!) backups
|
||||||
|
node.psql("postgres", "CREATE TABLE t1(a int)")
|
||||||
|
node.psql("postgres", "SELECT pg_switch_xlog()")
|
||||||
|
node.psql("postgres", "CREATE TABLE t2(a int)")
|
||||||
|
node.psql("postgres", "SELECT pg_switch_xlog()")
|
||||||
|
|
||||||
|
wals_dir = os.path.join(self.backup_dir(node), "wal")
|
||||||
|
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
|
||||||
|
wals = map(int, wals)
|
||||||
|
|
||||||
|
# delete last wal segment
|
||||||
|
print os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals)))
|
||||||
|
os.remove(os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals))))
|
||||||
|
|
||||||
|
# Need more accurate error message about loosing wal segment between backups
|
||||||
|
try:
|
||||||
|
self.backup_pb(node, backup_type='page')
|
||||||
|
# we should die here because exception is what we expect to happen
|
||||||
|
exit(1)
|
||||||
|
except ProbackupException, e:
|
||||||
|
self.assertEqual(
|
||||||
|
True,
|
||||||
|
'could not read WAL record' in e.message
|
||||||
|
)
|
||||||
|
self.delete_pb(node, id=self.show_pb(node)[1]['ID'])
|
||||||
|
|
||||||
|
|
||||||
|
##### Hole Smokes, Batman! We just lost a wal segment and know nothing about it
|
||||||
|
##### We need archive-push ASAP
|
||||||
|
self.backup_pb(node, backup_type='full')
|
||||||
|
self.assertEqual(False,
|
||||||
|
'validation completed successfully' in self.validate_pb(node))
|
||||||
|
########
|
||||||
|
|
||||||
|
node.stop()
|
||||||
|
Loading…
Reference in New Issue
Block a user