1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-01-05 13:20:31 +02:00

new tests added

This commit is contained in:
Grigory Smolkin 2017-09-28 10:32:06 +03:00
parent 765524854c
commit 237882cfaa
18 changed files with 728 additions and 293 deletions

View File

@ -19,6 +19,7 @@ def load_tests(loader, tests, pattern):
suite.addTests(loader.loadTestsFromModule(restore_test))
suite.addTests(loader.loadTestsFromModule(validate_test))
suite.addTests(loader.loadTestsFromModule(retention_test))
suite.addTests(loader.loadTestsFromModule(ptrack))
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
@ -34,14 +35,12 @@ def load_tests(loader, tests, pattern):
suite.addTests(loader.loadTestsFromModule(false_positive))
suite.addTests(loader.loadTestsFromModule(compression))
suite.addTests(loader.loadTestsFromModule(page))
suite.addTests(loader.loadTestsFromModule(ptrack))
suite.addTests(loader.loadTestsFromModule(archive))
return suite
# ToDo:
# archive:
# discrepancy of instance`s PGDATA and node`s PGDATA should lead to archive-push refusal to work
# discrepancy of instance`s SYSTEMID and node`s SYSTEMID should lead to archive-push refusal to work
# replica:
# backup should exit with correct error message if some master* option is missing

View File

@ -229,7 +229,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
@unittest.expectedFailure
#@unittest.expectedFailure
def test_replica_archive(self):
"""make node withput archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
fname = self.id().split('.')[3]
@ -256,7 +256,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# Settings for Replica
self.restore_node(backup_dir, 'master', replica)
self.set_replica(master, replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start({"-t": "600"})
@ -280,7 +280,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf('postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start({"-t": "600"})
node.start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -299,7 +299,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf('postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start({"-t": "600"})
node.start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)

View File

@ -5,7 +5,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
pg_probackup version
pg_probackup init -B backup-path
pg_probackup init -B backup-path [-l]
pg_probackup set-config -B backup-dir --instance=instance_name
[--log-level=log-level]
@ -26,7 +26,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
pg_probackup show-config -B backup-dir --instance=instance_name
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
[-C] [--stream [-S slot-name]] [--backup-pg-log]
[-C] [-l] [--stream [-S slot-name]] [--backup-pg-log]
[-j num-threads] [--archive-timeout=archive-timeout]
[--compress-algorithm=compress-algorithm]
[--compress-level=compress-level]
@ -37,12 +37,12 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--replica-timeout=timeout]
pg_probackup restore -B backup-dir --instance=instance_name
[-D pgdata-dir] [-i backup-id] [--progress]
[-D pgdata-dir] [-l] [-i backup-id] [--progress]
[--time=time|--xid=xid [--inclusive=boolean]]
[--timeline=timeline] [-T OLDDIR=NEWDIR]
pg_probackup validate -B backup-dir [--instance=instance_name]
[-i backup-id] [--progress]
[-i backup-id] [-l] [--progress]
[--time=time|--xid=xid [--inclusive=boolean]]
[--timeline=timeline]
@ -50,7 +50,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--instance=instance_name [-i backup-id]]
pg_probackup delete -B backup-dir --instance=instance_name
[--wal] [-i backup-id | --expired]
[--wal] [-i backup-id | --expired] [-l]
pg_probackup add-instance -B backup-dir -D pgdata-dir
--instance=instance_name

View File

@ -10,114 +10,6 @@ module_name = 'false_positive'
class FalsePositive(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_pgpro561(self):
"""
make node with archiving, make stream backup, restore it to node1,
check that archiving is not successful on node1
"""
fname = self.id().split('.')[3]
node1 = self.make_simple_node(base_dir="{0}/{1}/node1".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node1', node1)
self.set_archiving(backup_dir, 'node1', node1)
node1.start()
backup_id = self.backup_node(backup_dir, 'node1', node1, options=["--stream"])
node2 = self.make_simple_node(base_dir="{0}/{1}/node2".format(module_name, fname))
node2.cleanup()
node1.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
self.backup_node(backup_dir, 'node1', node1, backup_type='page', options=["--stream"])
self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir)
node2.append_conf('postgresql.auto.conf', 'port = {0}'.format(node2.port))
node2.start({"-t": "600"})
timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"]
timeline_node2 = node2.get_control_data()["Latest checkpoint's TimeLineID"]
self.assertEqual(timeline_node1, timeline_node2, "Timelines on Master and Node1 should be equal. This is unexpected")
archive_command_node1 = node1.safe_psql("postgres", "show archive_command")
archive_command_node2 = node2.safe_psql("postgres", "show archive_command")
self.assertEqual(archive_command_node1, archive_command_node2, "Archive command on Master and Node should be equal. This is unexpected")
result = node2.safe_psql("postgres", "select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL")
# self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip()))
if result == "":
self.assertEqual(1, 0, 'Error is expected due to Master and Node1 having the common archive and archive_command')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def pgpro688(self):
"""make node with archiving, make backup, get Recovery Time, validate to Recovery Time. Waiting PGPRO-688. RESOLVED"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
recovery_time = self.show_pb(backup_dir, 'node', backup_id)['recovery-time']
# Uncommenting this section will make this test True Positive
#node.safe_psql("postgres", "select pg_create_restore_point('123')")
#node.safe_psql("postgres", "select txid_current()")
#node.safe_psql("postgres", "select pg_switch_xlog()")
####
#try:
self.validate_pb(backup_dir, 'node', options=["--time='{0}'".format(recovery_time)])
# we should die here because exception is what we expect to happen
# self.assertEqual(1, 0, "Expecting Error because it should not be possible safely validate 'Recovery Time' without wal record with timestamp.\n Output: {0} \n CMD: {1}".format(
# repr(self.output), self.cmd))
# except ProbackupException as e:
# self.assertTrue('WARNING: recovery can be done up to time {0}'.format(recovery_time) in e.message,
# '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def pgpro702_688(self):
"""make node without archiving, make stream backup, get Recovery Time, validate to Recovery Time"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node, options=["--stream"])
recovery_time = self.show_pb(backup_dir, 'node', backup_id)['recovery-time']
self.assertIn(six.b("INFO: backup validation completed successfully on"),
self.validate_pb(backup_dir, 'node', node, options=["--time='{0}'".format(recovery_time)]))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
@unittest.expectedFailure
def test_validate_wal_lost_segment(self):

View File

@ -175,6 +175,7 @@ class ProbackupTest(object):
# Allow replication in pg_hba.conf
if set_replication:
node.set_replication_conf()
node.append_conf("postgresql.auto.conf", "max_wal_senders = 10")
return node
def create_tblspace_in_node(self, node, tblspc_name, cfs=False):
@ -295,27 +296,17 @@ class ProbackupTest(object):
idx_dict['type'], idx_dict))
def check_ptrack_recovery(self, idx_dict):
success = True
size = idx_dict['size']
for PageNum in range(size):
if idx_dict['ptrack'][PageNum] != 1:
if self.verbose:
print('Recovery for Page Number {0} of Type {1} was conducted, but ptrack value is {2}. THIS IS BAD'.format(
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]))
print(idx_dict)
success = False
self.assertEqual(success, True)
self.assertTrue(False, 'Recovery for Page Number {0} of Type {1} was conducted, but ptrack value is {2}. THIS IS BAD\n IDX_DICT: {3}'.format(
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum], idx_dict))
def check_ptrack_clean(self, idx_dict, size):
success = True
for PageNum in range(size):
if idx_dict['ptrack'][PageNum] != 0:
if self.verbose:
print('Ptrack for Page Number {0} of Type {1} should be clean, but ptrack value is {2}. THIS IS BAD'.format(
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]))
print(idx_dict)
success = False
self.assertEqual(success, True, '')
self.assertTrue(False, 'Ptrack for Page Number {0} of Type {1} should be clean, but ptrack value is {2}.\n THIS IS BAD\n IDX_DICT: {3}'.format(
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum], idx_dict))
def run_pb(self, command):
try:
@ -365,7 +356,7 @@ class ProbackupTest(object):
def clean_pb(self, backup_dir):
shutil.rmtree(backup_dir, ignore_errors=True)
def backup_node(self, backup_dir, instance, node=False, data_dir=False, backup_type="full", options=[]):
def backup_node(self, backup_dir, instance, node, data_dir=False, backup_type="full", options=[]):
if not node and not data_dir:
print('You must provide ether node or data_dir for backup')
exit(1)
@ -379,7 +370,7 @@ class ProbackupTest(object):
cmd_list = [
"backup",
"-B", backup_dir,
"-D", pgdata,
# "-D", pgdata,
"-p", "%i" % node.port,
"-d", "postgres",
"--instance={0}".format(instance)
@ -560,8 +551,8 @@ class ProbackupTest(object):
"primary_conninfo = 'user={0} port={1} application_name={2} sslmode=prefer sslcompression=1'".format(
self.user, master.port, replica_name))
if synchronous:
master.append_conf('postgresql.auto.conf', 'synchronous_standby_names="{0}"'.format(replica_name))
master.append_conf('postgresql.auto.conf', 'synchronous_commit="remote_apply"')
master.append_conf('postgresql.auto.conf', "synchronous_standby_names='{0}'".format(replica_name))
master.append_conf('postgresql.auto.conf', "synchronous_commit='remote_apply'")
master.reload()
def wrong_wal_clean(self, node, wal_size):
@ -604,3 +595,36 @@ class ProbackupTest(object):
os.rmdir(os.path.join(self.tmp_path, module_name))
except:
pass
def pgdata_content(self, directory):
""" return dict with directory content"""
dirs_to_ignore = ['pg_xlog', 'pg_wal', 'pg_log', 'pg_stat_tmp', 'pg_subtrans', 'pg_notify']
files_to_ignore = ['postmaster.pid', 'postmaster.opts']
suffixes_to_ignore = ('_ptrack', '_vm', '_fsm')
directory_dict = {}
directory_dict['pgdata'] = directory
directory_dict['files'] = {}
for root, dirs, files in os.walk(directory, followlinks=True):
dirs[:] = [d for d in dirs if d not in dirs_to_ignore]
for file in files:
if file in files_to_ignore or file.endswith(suffixes_to_ignore):
continue
file = os.path.join(root,file)
file_relpath = os.path.relpath(file, directory)
directory_dict['files'][file_relpath] = hashlib.md5(open(file, 'rb').read()).hexdigest()
return directory_dict
def compare_pgdata(self, original_pgdata, restored_pgdata):
""" return dict with directory content"""
fail = False
error_message = ''
for file in original_pgdata['files']:
if file in restored_pgdata['files']:
if original_pgdata['files'][file] != restored_pgdata['files'][file]:
error_message += '\nChecksumm mismatch.\n File_old: {0}\n File_new: {1}'.format(
os.path.join(original_pgdata['pgdata'], file), os.path.join(restored_pgdata['pgdata'], file))
fail = True
else:
error_message += '\nFile dissappearance. File: {0}/{1}'.format(restored_pgdata['pgdata'], file)
fail = True
self.assertFalse(fail, error_message)

View File

@ -87,7 +87,7 @@ class ArchiveCheck(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'INFO: wait for LSN' in e.message
'INFO: wait for WAL segment' in e.message
and 'ERROR: switched WAL segment' in e.message
and 'could not be archived' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))

View File

@ -3,6 +3,8 @@ import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
import subprocess
from testgres import ClusterException
import shutil, sys
module_name = 'ptrack'
@ -304,9 +306,9 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_drop_db(self):
"""Make node, create database, create table in database, take ptrack backup, drop database, take ptrack backup"""
# @unittest.skip("skip")
def test_create_db(self):
"""Make node, take full backup, create database db1, take ptrack backup, restore database and check it presense"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -327,19 +329,162 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
node.safe_psql("postgres", "SELECT * FROM t_heap")
self.backup_node(backup_dir, 'node', node, options=["--stream"])
# PTRACK BACKUP
# CREATE DATABASE DB1
node.safe_psql(
"postgres", "create database db1")
node.safe_psql("db1", "create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
# node.safe_psql("db1", "create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
# result = node.safe_psql("db1", "select * from t_heap")
# PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"])
pgdata_content = self.pgdata_content(node.data_dir)
node.safe_psql(
"postgres", "checkpoint;")
# DROP DATABASE DB1
#node.safe_psql(
# "postgres", "drop database db1")
# SECOND PTRACK BACKUP
node.safe_psql(
"postgres", "drop database db1")
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"])
#self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"])
node_restored = self.make_simple_node(base_dir="{0}/{1}/node_restored".format(module_name, fname))
node_restored.cleanup()
self.restore_node(backup_dir, 'node', node_restored, options=["-j", "4"])
pgdata_content_new = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata_content, pgdata_content_new)
result_new = node_restored.safe_psql("db1", "select * from t_heap")
self.assertEqual(result, result_new)
try:
node_restored.safe_psql('db1', 'select 1')
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because we are connecting to deleted database.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ClusterException as e:
self.assertTrue('FATAL: database "db1" does not exist' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_drop_tablespace(self):
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s', 'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# CREATE TABLE
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
result = node.safe_psql("postgres", "select * from t_heap")
# FULL BACKUP
self.backup_node(backup_dir, 'node', node, options=["--stream"])
# Move table to tablespace 'somedata'
node.safe_psql(
"postgres", "alter table t_heap set tablespace somedata")
# PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"])
# Move table back to default tablespace
node.safe_psql(
"postgres", "alter table t_heap set tablespace pg_default")
# SECOND PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"])
# DROP TABLESPACE 'somedata'
node.safe_psql(
"postgres", "drop tablespace somedata")
# THIRD PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"])
tblspace = self.get_tblspace_path(node, 'somedata')
node.cleanup()
shutil.rmtree(tblspace, ignore_errors=True)
self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
node.start()
tblspc_exist = node.safe_psql("postgres", "select exists(select 1 from pg_tablespace where spcname = 'somedata')")
if tblspc_exist.rstrip() == 't':
self.assertEqual(1, 0, "Expecting Error because tablespace 'somedata' should not be present")
result_new = node.safe_psql("postgres", "select * from t_heap")
self.assertEqual(result, result_new)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_alter_tablespace(self):
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s', 'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# CREATE TABLE
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
result = node.safe_psql("postgres", "select * from t_heap")
# FULL BACKUP
self.backup_node(backup_dir, 'node', node, options=["--stream"])
# Move table to separate tablespace
node.safe_psql(
"postgres", "alter table t_heap set tablespace somedata")
# FIRTS PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"])
# Restore ptrack backup and check table consistency
restored_node = self.make_simple_node(base_dir="{0}/{1}/restored_node".format(module_name, fname))
restored_node.cleanup()
tblspc_path = self.get_tblspace_path(node, 'somedata')
tblspc_path_new = self.get_tblspace_path(restored_node, 'somedata_restored')
self.restore_node(backup_dir, 'node', restored_node, options=[
"-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
restored_node.append_conf("postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
result_new = restored_node.safe_psql("postgres", "select * from t_heap")
self.assertEqual(result, result_new)
restored_node.cleanup()
shutil.rmtree(tblspc_path_new, ignore_errors=True)
# Move table to default tablespace
node.safe_psql(
"postgres", "alter table t_heap set tablespace pg_default")
# SECOND PTRACK BACKUP
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=["--stream"])
# Restore second ptrack backup and check table consistency
self.restore_node(backup_dir, 'node', restored_node, options=[
"-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
restored_node.append_conf("postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
result_new = restored_node.safe_psql("postgres", "select * from t_heap")
self.assertEqual(result, result_new)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -11,6 +11,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_clean(self):
"""Take backups of every available types and check that PTRACK is clean"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
@ -33,8 +34,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
# Take FULL backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
# get fork size and calculate it in pages
@ -46,12 +47,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything, vacuum it and make PTRACK BACKUP
# Update everything and vacuum it
node.safe_psql('postgres', "update t_heap set t_seq = nextval('t_seq'), text = md5(text), tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
node.safe_psql('postgres', 'vacuum t_heap')
# Take PTRACK backup to clean every ptrack
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['-j100'])
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
@ -64,11 +65,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything, vacuum it and make PAGE BACKUP
# Update everything and vacuum it
node.safe_psql('postgres', "update t_heap set t_seq = nextval('t_seq'), text = md5(text), tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
node.safe_psql('postgres', 'vacuum t_heap')
# Make page backup to clean every ptrack
# Take PAGE backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, backup_type='page', options=['-j100'])
node.safe_psql('postgres', 'checkpoint')
@ -89,6 +90,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_clean_replica(self):
"""Take backups of every available types from master and check that PTRACK on replica is clean"""
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
@ -99,14 +101,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'master', master)
master.start()
self.create_tblspace_in_node(master, 'somedata')
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica, options=['-T', '{0}={1}'.format(
self.get_tblspace_path(master, 'somedata'), self.get_tblspace_path(replica, 'somedata_new'))])
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica)
@ -116,13 +116,13 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
# Make full backup to clean every ptrack
# Take FULL backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
@ -136,10 +136,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything, vacuum it and make PTRACK BACKUP
# Update everything and vacuum it
master.safe_psql('postgres', "update t_heap set t_seq = nextval('t_seq'), text = md5(text), tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
master.safe_psql('postgres', 'vacuum t_heap')
# Take PTRACK backup to clean every ptrack
backup_id = self.backup_node(backup_dir, 'replica', replica, backup_type='ptrack', options=['-j100', '--stream',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
@ -155,25 +156,24 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# WAITING FOR PGPRO-820
## Update everything, vacuum it and make PAGE BACKUP
# master.safe_psql('postgres', "update t_heap set t_seq = nextval('t_seq'), text = md5(text), tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
# master.safe_psql('postgres', 'vacuum t_heap')
# master.safe_psql('postgres', 'checkpoint')
# Update everything and vacuum it
master.safe_psql('postgres', "update t_heap set t_seq = nextval('t_seq'), text = md5(text), tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
## Make page backup to clean every ptrack
# self.backup_node(backup_dir, 'replica', replica, backup_type='page', options=['-j100',
# '--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
# for i in idx_ptrack:
# # get new size of heap and indexes and calculate it in pages
# idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# # update path to heap and index files in case they`ve changed
# idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# # # get ptrack for every idx
# idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
# replica, idx_ptrack[i]['path'], idx_ptrack[i]['size'], idx_ptrack[i]['size'])
# # check that ptrack bits are cleaned
# self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Take PAGE backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, backup_type='page', options=['-j100',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -10,7 +10,7 @@ module_name = 'ptrack_cluster'
class SimpleTest(ProbackupTest, unittest.TestCase):
@unittest.skip("skip")
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_cluster_on_btree(self):
fname = self.id().split('.')[3]
@ -70,7 +70,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
@unittest.skip("skip")
# @unittest.skip("skip")
def test_ptrack_cluster_on_gist(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
@ -82,15 +82,13 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
node.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
@ -123,7 +121,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
# Compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
@ -141,31 +139,27 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'master', master)
master.start()
self.create_tblspace_in_node(master, 'somedata')
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica, options=['-T', '{0}={1}'.format(
self.get_tblspace_path(master, 'somedata'), self.get_tblspace_path(replica, 'somedata_new'))])
self.restore_node(backup_dir, 'master', replica)
replica.append_conf('postgresql.auto.conf', 'port = {0}'.format(replica.port))
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
#exit(1)
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
@ -215,28 +209,25 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'master', master)
master.start()
self.create_tblspace_in_node(master, 'somedata')
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica, options=['-T', '{0}={1}'.format(
self.get_tblspace_path(master, 'somedata'), self.get_tblspace_path(replica, 'somedata_new'))])
self.restore_node(backup_dir, 'master', replica)
replica.append_conf('postgresql.auto.conf', 'port = {0}'.format(replica.port))
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
@ -257,7 +248,6 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'cluster t_heap using t_gist')
master.safe_psql('postgres', 'checkpoint')
#sleep(10)
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
@ -271,7 +261,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
# Compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself

130
tests/ptrack_truncate.py Normal file
View File

@ -0,0 +1,130 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_truncate'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_truncate(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'truncate t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_truncate_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
replica.safe_psql('postgres', 'truncate t_heap')
replica.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Delete some rows, vacuum it and make checkpoint
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -74,7 +74,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
@unittest.skip("skip")
# @unittest.skip("skip")
def test_ptrack_vacuum_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
@ -86,64 +86,64 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'master', master)
master.start()
self.create_tblspace_in_node(master, 'somedata')
self.backup_node(backup_dir, 'master', master, options=['--stream'])
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname))
node.cleanup()
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', node, options=['-T', '{0}={1}'.format(
self.get_tblspace_path(master, 'somedata'), self.get_tblspace_path(node, 'somedata_new'))])
self.restore_node(backup_dir, 'master', replica)
node.append_conf('postgresql.auto.conf', 'port = {0}'.format(node.port))
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
node.safe_psql(
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
# Make FULL backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Delete some rows, vacuum it and make checkpoint
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -66,7 +66,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
@unittest.skip("skip")
# @unittest.skip("skip")
def test_ptrack_vacuum_bits_frozen_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
@ -78,56 +78,56 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'master', master)
master.start()
self.create_tblspace_in_node(master, 'somedata')
self.backup_node(backup_dir, 'master', master, options=['--stream'])
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname))
node.cleanup()
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', node, options=['-T', '{0}={1}'.format(
self.get_tblspace_path(master, 'somedata'), self.get_tblspace_path(node, 'somedata_new'))])
self.restore_node(backup_dir, 'master', replica)
node.append_conf('postgresql.auto.conf', 'port = {0}'.format(node.port))
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
node.safe_psql(
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'checkpoint')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
# Take PTRACK backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j100',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
node.safe_psql('postgres', 'vacuum freeze t_heap')
node.safe_psql('postgres', 'checkpoint')
master.safe_psql('postgres', 'vacuum freeze t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -43,7 +43,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')

View File

@ -67,3 +67,74 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_full_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,127) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Take FULL backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'vacuum full t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity, the most important part
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -67,3 +67,76 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_truncate_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Take PTRACK backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j100',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id > 128;')
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -13,7 +13,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_replica_stream_ptrack_backup(self):
"""make full stream backup from replica"""
"""make node, take full backup, restore it and make replica from it, take full stream backup from replica"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
@ -25,9 +25,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
# CREATE TABLE
master.psql(
"postgres",
@ -36,6 +33,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# take full backup and restore it
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.set_replica(master, replica)
@ -60,7 +59,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf('postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start({"-t": "600"})
node.start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -79,7 +78,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf('postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start({"-t": "600"})
node.start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -89,10 +88,11 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_replica_archive_page_backup(self):
"""make full archive backup from replica, set replica, make backup from replica"""
"""make archive master, take full and page archive backups from master, set replica, make archive backup from replica"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s'}
)
@ -142,7 +142,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf('postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start({"-t": "600"})
node.start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -161,7 +161,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf('postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start({"-t": "600"})
node.start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)

View File

@ -3,6 +3,7 @@ import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
import subprocess
from datetime import datetime
import sys
module_name = 'restore'
@ -32,7 +33,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
backup_id = self.backup_node(backup_dir, 'node', node)
node.stop({"-m": "immediate"})
node.stop()
node.cleanup()
# 1 - Test recovery from latest
@ -44,7 +45,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
recovery_conf = os.path.join(node.data_dir, "recovery.conf")
self.assertEqual(os.path.isfile(recovery_conf), True)
node.start({"-t": "600"})
node.start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -78,14 +79,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
node.stop({"-m": "immediate"})
node.stop()
node.cleanup()
self.assertIn("INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -93,7 +94,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
#@unittest.skip("skip")
# @unittest.skip("skip")
def test_restore_to_specific_timeline(self):
"""recovery to target timeline"""
fname = self.id().split('.')[3]
@ -114,14 +115,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
backup_id = self.backup_node(backup_dir, 'node', node)
target_tli = int(node.get_control_data()["Latest checkpoint's TimeLineID"])
node.stop({"-m": "immediate"})
node.stop()
node.cleanup()
self.assertIn("INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
@ -129,7 +130,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.backup_node(backup_dir, 'node', node)
node.stop({"-m": "immediate"})
node.stop()
node.cleanup()
# Correct Backup must be choosen for restore
@ -140,7 +141,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"]
self.assertEqual(int(recovery_target_timeline), target_tli)
node.start({"-t": "600"})
node.start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -179,7 +180,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'node', node, options=["-j", "4", '--time="{0}"'.format(target_time)]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -227,14 +228,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# been archived up to the xmin point saved earlier without that.
#node.execute("postgres", "SELECT pg_switch_xlog()")
node.stop({"-m": "fast"})
node.stop()
node.cleanup()
self.assertIn("INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(backup_dir, 'node', node, options=["-j", "4", '--xid={0}'.format(target_xid)]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -268,14 +269,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
node.stop({"-m": "immediate"})
node.stop()
node.cleanup()
self.assertIn("INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -315,14 +316,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
node.stop({"-m": "immediate"})
node.stop()
node.cleanup()
self.assertIn("INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -364,7 +365,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -406,14 +407,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
delta = node.execute("postgres", "SELECT sum(delta) FROM pgbench_history")
self.assertEqual(bbalance, delta)
node.stop({"-m": "immediate"})
node.stop()
node.cleanup()
self.assertIn("INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
bbalance = node.execute("postgres", "SELECT sum(bbalance) FROM pgbench_branches")
delta = node.execute("postgres", "SELECT sum(delta) FROM pgbench_history")
@ -458,14 +459,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.assertEqual(bbalance, delta)
node.stop({"-m": "immediate"})
node.stop()
node.cleanup()
#self.wrong_wal_clean(node, wal_segment_size)
self.assertIn("INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
bbalance = node.execute("postgres", "SELECT sum(bbalance) FROM pgbench_branches")
delta = node.execute("postgres", "SELECT sum(delta) FROM pgbench_history")
@ -514,7 +515,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# been archived up to the xmin point saved earlier without that.
# node.execute("postgres", "SELECT pg_switch_xlog()")
node.stop({"-m": "fast"})
node.stop()
node.cleanup()
self.assertIn("INFO: Restore of backup {0} completed.".format(backup_id),
@ -522,7 +523,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
options=["-j", "4", '--xid={0}'.format(target_xid), "--inclusive=false"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -684,7 +685,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
#@unittest.skip("skip")
# @unittest.skip("skip")
def test_archive_node_backup_stream_restore_to_recovery_time(self):
"""make node with archiving, make stream backup, make PITR to Recovery Time"""
fname = self.id().split('.')[3]
@ -711,7 +712,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'node', node, options=["-j", "4", '--time="{0}"'.format(recovery_time)]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
result = node.psql("postgres", 'select * from t_heap')
self.assertTrue('does not exist' in result[2].decode("utf-8"))
@ -720,7 +721,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.del_test_dir(module_name, fname)
#@unittest.skip("skip")
# @unittest.skip("skip")
@unittest.expectedFailure
def test_archive_node_backup_stream_restore_to_recovery_time(self):
"""make node with archiving, make stream backup, make PITR to Recovery Time"""
fname = self.id().split('.')[3]
@ -737,7 +739,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
backup_id = self.backup_node(backup_dir, 'node', node, options=["--stream"])
node.safe_psql("postgres", "create table t_heap(a int)")
node.safe_psql("postgres", "select pg_switch_xlog()")
node.stop()
node.cleanup()
@ -747,7 +748,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'node', node, options=["-j", "4", '--time="{0}"'.format(recovery_time)]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
result = node.psql("postgres", 'select * from t_heap')
self.assertTrue('does not exist' in result[2].decode("utf-8"))
@ -756,6 +757,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
@unittest.expectedFailure
def test_archive_node_backup_stream_pitr(self):
"""make node with archiving, make stream backup, create table t_heap, make pitr to Recovery Time, check that t_heap do not exists"""
fname = self.id().split('.')[3]
@ -781,7 +783,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
options=["-j", "4", '--time="{0}"'.format(recovery_time)]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
result = node.psql("postgres", 'select * from t_heap')
self.assertEqual(True, 'does not exist' in result[2].decode("utf-8"))
@ -790,6 +792,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
@unittest.expectedFailure
def test_archive_node_backup_archive_pitr_2(self):
"""make node with archiving, make archive backup, create table t_heap, make pitr to Recovery Time, check that t_heap do not exists"""
fname = self.id().split('.')[3]
@ -805,7 +808,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
backup_id = self.backup_node(backup_dir, 'node', node)
node.safe_psql("postgres", "create table t_heap(a int)")
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.stop()
node.cleanup()
recovery_time = self.show_pb(backup_dir, 'node', backup_id)['recovery-time']
@ -815,7 +818,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
options=["-j", "4", '--time="{0}"'.format(recovery_time)]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
node.start({"-t": "600"})
node.start()
result = node.psql("postgres", 'select * from t_heap')
self.assertTrue('does not exist' in result[2].decode("utf-8"))

View File

@ -304,3 +304,111 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_pgpro702_688(self):
"""make node without archiving, make stream backup, get Recovery Time, validate to Recovery Time"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node, options=["--stream"])
recovery_time = self.show_pb(backup_dir, 'node', backup_id)['recovery-time']
self.assertIn("INFO: backup validation completed successfully on"),
self.validate_pb(backup_dir, 'node', node, options=["--time='{0}'".format(recovery_time)])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_pgpro688(self):
"""make node with archiving, make backup, get Recovery Time, validate to Recovery Time. Waiting PGPRO-688. RESOLVED"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
recovery_time = self.show_pb(backup_dir, 'node', backup_id)['recovery-time']
# Uncommenting this section will make this test True Positive
#node.safe_psql("postgres", "select pg_create_restore_point('123')")
#node.safe_psql("postgres", "select txid_current()")
#node.safe_psql("postgres", "select pg_switch_xlog()")
####
#try:
self.validate_pb(backup_dir, 'node', options=["--time='{0}'".format(recovery_time)])
# we should die here because exception is what we expect to happen
# self.assertEqual(1, 0, "Expecting Error because it should not be possible safely validate 'Recovery Time' without wal record with timestamp.\n Output: {0} \n CMD: {1}".format(
# repr(self.output), self.cmd))
# except ProbackupException as e:
# self.assertTrue('WARNING: recovery can be done up to time {0}'.format(recovery_time) in e.message,
# '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_pgpro561(self):
"""
make node with archiving, make stream backup, restore it to node1,
check that archiving is not successful on node1
"""
fname = self.id().split('.')[3]
node1 = self.make_simple_node(base_dir="{0}/{1}/node1".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node1', node1)
self.set_archiving(backup_dir, 'node1', node1)
node1.start()
backup_id = self.backup_node(backup_dir, 'node1', node1, options=["--stream"])
node2 = self.make_simple_node(base_dir="{0}/{1}/node2".format(module_name, fname))
node2.cleanup()
node1.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
self.backup_node(backup_dir, 'node1', node1, backup_type='page', options=["--stream"])
self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir)
node2.append_conf('postgresql.auto.conf', 'port = {0}'.format(node2.port))
node2.start()
timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"]
timeline_node2 = node2.get_control_data()["Latest checkpoint's TimeLineID"]
self.assertEqual(timeline_node1, timeline_node2, "Timelines on Master and Node1 should be equal. This is unexpected")
archive_command_node1 = node1.safe_psql("postgres", "show archive_command")
archive_command_node2 = node2.safe_psql("postgres", "show archive_command")
self.assertEqual(archive_command_node1, archive_command_node2, "Archive command on Master and Node should be equal. This is unexpected")
result = node2.safe_psql("postgres", "select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL")
# self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip()))
if result == "":
self.assertEqual(1, 0, 'Error is expected due to Master and Node1 having the common archive and archive_command')
# Clean after yourself
self.del_test_dir(module_name, fname)