1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-09-16 09:26:30 +02:00

tests: added ptrack.PtrackTest.test_corrupt_ptrack_map and ptrack.PtrackTest.test_ptrack_pg_resetxlog

This commit is contained in:
Grigory Smolkin
2019-12-25 17:41:19 +03:00
parent 75fefb0858
commit 1c791f3433

View File

@@ -100,7 +100,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
# self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
@@ -283,7 +283,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
if node.major_version >= 12:
@@ -335,7 +334,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
if node.major_version >= 12:
@@ -404,7 +402,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
if node.major_version >= 12:
@@ -412,7 +409,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
"postgres",
"CREATE EXTENSION ptrack")
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, options=['--stream'])
con = node.connect("postgres")
con.execute(
@@ -464,7 +461,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
@@ -474,7 +470,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
"postgres",
"CREATE EXTENSION ptrack")
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, options=['--stream'])
node.safe_psql(
"postgres",
@@ -499,10 +495,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
gdb.continue_execution_until_break(20)
self.backup_node(
backup_dir, 'node', node, backup_type='ptrack')
backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
self.backup_node(
backup_dir, 'node', node, backup_type='ptrack')
backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
@@ -555,7 +551,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
@@ -577,7 +572,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
"postgres",
"vacuum t_heap")
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, options=['--stream'])
node.safe_psql(
"postgres",
@@ -588,10 +583,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
"vacuum t_heap")
self.backup_node(
backup_dir, 'node', node, backup_type='ptrack')
backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
self.backup_node(
backup_dir, 'node', node, backup_type='ptrack')
backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
@@ -639,7 +634,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
if node.major_version >= 12:
@@ -1009,7 +1003,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
node.safe_psql("postgres", "SELECT * FROM t_heap")
self.backup_node(backup_dir, 'node', node)
# PAGE BACKUP
node.safe_psql(
@@ -1145,7 +1138,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
if node.major_version >= 12:
@@ -1174,8 +1166,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# PTRACK BACKUP
backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack',
options=["--stream"])
backup_type='ptrack', options=["--stream"])
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
@@ -1267,7 +1258,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
if node.major_version >= 12:
@@ -1286,14 +1276,13 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
replica.cleanup()
self.backup_node(
backup_dir, 'node', node, options=['-j10'])
backup_dir, 'node', node, options=['-j10', '--stream'])
self.restore_node(backup_dir, 'node', replica)
# Add replica
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(node, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
self.backup_node(
@@ -1750,7 +1739,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
if node.major_version >= 12:
@@ -1763,7 +1751,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# CREATE TABLE
node.pgbench_init(scale=100, options=['--tablespace=somedata'])
# FULL BACKUP
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, options=['--stream'])
# PTRACK STUFF
if node.major_version < 12:
@@ -1801,7 +1789,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
result = node.safe_psql("postgres", "select * from pgbench_accounts")
# FIRTS PTRACK BACKUP
self.backup_node(
backup_dir, 'node', node, backup_type='ptrack')
backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
# GET PHYSICAL CONTENT FROM NODE
pgdata = self.pgdata_content(node.data_dir)
@@ -1921,7 +1909,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
@@ -1971,7 +1958,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# Take PTRACK backup to clean every ptrack
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack', options=['-j10'])
backup_dir, 'node', node, backup_type='ptrack', options=['-j10', '--stream'])
node.safe_psql('postgres', 'checkpoint')
@@ -1997,7 +1984,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# Take PAGE backup to clean every ptrack
self.backup_node(
backup_dir, 'node', node,
backup_type='page', options=['-j10'])
backup_type='page', options=['-j10', '--stream'])
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
@@ -2049,7 +2036,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
@@ -2312,7 +2298,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
@@ -2393,7 +2378,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
@@ -2485,7 +2469,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
if node.major_version >= 12:
@@ -2531,7 +2514,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# Take PTRACK backup
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['-j10'])
options=['-j10', '--stream'])
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
@@ -2586,7 +2569,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table
@@ -2766,7 +2748,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
@@ -2964,7 +2945,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
@@ -3151,7 +3131,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
@@ -3411,7 +3390,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
@@ -3593,7 +3571,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
@@ -3754,7 +3731,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
@@ -3871,4 +3847,203 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# TODO: ptrack.map corruption
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_pg_resetxlog(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
pg_options={
'autovacuum': 'off',
'shared_buffers': '512MB',
'max_wal_size': '3GB'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
if node.major_version >= 12:
node.safe_psql(
"postgres",
"CREATE EXTENSION ptrack")
# Create table
node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap "
"as select nextval('t_seq')::int as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
# "from generate_series(0,25600) i")
"from generate_series(0,2560) i")
self.backup_node(
backup_dir, 'node', node, options=['--stream'])
# Create indexes
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"CREATE INDEX {0} ON {1} USING {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql(
'postgres',
"update t_heap set id = nextval('t_seq'), text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector")
# node.safe_psql(
# 'postgres',
# "create extension pg_buffercache")
#
# print(node.safe_psql(
# 'postgres',
# "SELECT count(*) FROM pg_buffercache WHERE isdirty"))
# kill the bastard
if self.verbose:
print('Killing postmaster. Losing Ptrack changes')
node.stop(['-m', 'immediate', '-D', node.data_dir])
# now smack it with sledgehammer
if node.major_version >= 10:
pg_resetxlog_path = self.get_bin_path('pg_resetwal')
wal_dir = 'pg_wal'
else:
pg_resetxlog_path = self.get_bin_path('pg_resetxlog')
wal_dir = 'pg_xlog'
self.run_binary(
[
pg_resetxlog_path,
'-D',
node.data_dir,
'-o 42',
'-f'
],
asynchronous=False)
if not node.status():
node.slow_start()
else:
print("Die! Die! Why won't you die?... Why won't you die?")
exit(1)
# take ptrack backup
# self.backup_node(
# backup_dir, 'node', node,
# backup_type='ptrack', options=['--stream'])
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=['--stream'])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because instance was brutalized by pg_resetxlog"
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd)
)
except ProbackupException as e:
self.assertIn(
'Insert error message',
e.message,
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(e.message), self.cmd))
# pgdata = self.pgdata_content(node.data_dir)
#
# node_restored = self.make_simple_node(
# base_dir=os.path.join(module_name, fname, 'node_restored'))
# node_restored.cleanup()
#
# self.restore_node(
# backup_dir, 'node', node_restored)
#
# pgdata_restored = self.pgdata_content(node_restored.data_dir)
# self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_corrupt_ptrack_map(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
if node.major_version >= 12:
node.safe_psql(
"postgres",
"CREATE EXTENSION ptrack")
# Create table
node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap "
"as select nextval('t_seq')::int as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
self.backup_node(
backup_dir, 'node', node, options=['--stream'])
node.safe_psql(
'postgres',
"update t_heap set id = nextval('t_seq'), text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector")
# kill the bastard
if self.verbose:
print('Killing postmaster. Losing Ptrack changes')
node.stop(['-m', 'immediate', '-D', node.data_dir])
# Let`s do index corruption. ptrack.map, ptrack.map.mmap
with open(os.path.join(node.data_dir, 'global', 'ptrack.map'), "rb+", 0) as f:
f.seek(42)
f.write(b"blablahblahs")
f.flush()
f.close
with open(os.path.join(node.data_dir, 'global', 'ptrack.map.mmap'), "rb+", 0) as f:
f.seek(42)
f.write(b"blablahblahs")
f.flush()
f.close
# os.remove(os.path.join(node.logs_dir, node.pg_log_name))
node.slow_start()
self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
node.cleanup()
self.restore_node(backup_dir, 'node', node)
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)