1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-01-22 11:44:34 +02:00
pg_probackup/tests/ptrack_clean.py

256 lines
10 KiB
Python
Raw Normal View History

2017-06-20 13:57:23 +03:00
import os
2017-06-27 08:42:52 +03:00
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
2018-04-28 18:49:34 +03:00
import time
2017-05-03 14:14:48 +03:00
2017-07-12 17:28:28 +03:00
module_name = 'ptrack_clean'
2017-05-03 14:14:48 +03:00
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
2017-05-03 14:14:48 +03:00
def test_ptrack_clean(self):
2017-09-28 10:32:06 +03:00
"""Take backups of every available types and check that PTRACK is clean"""
2017-05-03 14:14:48 +03:00
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
2017-05-03 14:14:48 +03:00
set_replication=True,
2017-06-20 13:57:23 +03:00
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
2017-07-12 17:28:28 +03:00
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
2017-06-20 13:57:23 +03:00
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
2017-05-03 14:14:48 +03:00
node.start()
2017-06-20 13:57:23 +03:00
2017-05-03 14:14:48 +03:00
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
2017-07-12 17:28:28 +03:00
node.safe_psql(
2017-05-03 14:14:48 +03:00
"postgres",
2018-10-19 14:54:17 +03:00
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
2018-04-28 18:49:34 +03:00
"as select i as id, nextval('t_seq') as t_seq, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
2018-10-16 22:53:59 +03:00
"from generate_series(0,2560) i")
2017-05-03 14:14:48 +03:00
for i in idx_ptrack:
2017-07-12 17:28:28 +03:00
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2018-04-28 18:49:34 +03:00
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
2017-05-03 14:14:48 +03:00
2017-09-28 10:32:06 +03:00
# Take FULL backup to clean every ptrack
2018-04-28 18:49:34 +03:00
self.backup_node(
backup_dir, 'node', node,
options=['-j10', '--stream'])
2017-10-09 15:32:48 +03:00
node.safe_psql('postgres', 'checkpoint')
2017-05-03 14:14:48 +03:00
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# get ptrack for every idx
2017-05-05 16:21:49 +03:00
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2017-07-12 17:28:28 +03:00
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2017-05-03 18:05:19 +03:00
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2017-05-03 14:14:48 +03:00
2017-09-28 10:32:06 +03:00
# Update everything and vacuum it
2018-04-28 18:49:34 +03:00
node.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), "
"text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2017-07-12 17:28:28 +03:00
node.safe_psql('postgres', 'vacuum t_heap')
2017-05-03 14:14:48 +03:00
2017-09-28 10:32:06 +03:00
# Take PTRACK backup to clean every ptrack
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
2018-04-28 18:49:34 +03:00
options=['-j10', '--log-level-file=verbose'])
2017-10-09 15:32:48 +03:00
node.safe_psql('postgres', 'checkpoint')
2017-05-03 14:14:48 +03:00
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# # get ptrack for every idx
2017-05-05 16:21:49 +03:00
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2017-07-12 17:28:28 +03:00
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2017-05-03 14:14:48 +03:00
# check that ptrack bits are cleaned
2017-05-03 18:05:19 +03:00
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2017-09-28 10:32:06 +03:00
# Update everything and vacuum it
2018-04-28 18:49:34 +03:00
node.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), "
"text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2017-07-12 17:28:28 +03:00
node.safe_psql('postgres', 'vacuum t_heap')
2017-05-03 18:05:19 +03:00
2017-09-28 10:32:06 +03:00
# Take PAGE backup to clean every ptrack
2018-04-28 18:49:34 +03:00
self.backup_node(
backup_dir, 'node', node,
backup_type='page', options=['-j10'])
2017-07-12 17:28:28 +03:00
node.safe_psql('postgres', 'checkpoint')
2017-05-03 18:05:19 +03:00
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# # get ptrack for every idx
2017-05-05 16:21:49 +03:00
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2017-07-12 17:28:28 +03:00
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_clean_replica(self):
2017-09-28 10:32:06 +03:00
"""Take backups of every available types from master and check that PTRACK on replica is clean"""
2017-07-12 17:28:28 +03:00
fname = self.id().split('.')[3]
2018-04-28 18:49:34 +03:00
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
2017-07-12 17:28:28 +03:00
set_replication=True,
initdb_params=['--data-checksums'],
2018-04-28 18:49:34 +03:00
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
2017-07-12 17:28:28 +03:00
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
2018-04-28 18:49:34 +03:00
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
2017-07-12 17:28:28 +03:00
replica.cleanup()
2017-09-28 10:32:06 +03:00
self.restore_node(backup_dir, 'master', replica)
2017-07-12 17:28:28 +03:00
self.add_instance(backup_dir, 'replica', replica)
2018-04-28 18:49:34 +03:00
self.set_replica(master, replica, synchronous=True)
2017-07-12 17:28:28 +03:00
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
2018-10-19 14:54:17 +03:00
"create extension bloom; create sequence t_seq; "
"create table t_heap as select i as id, "
2018-04-28 18:49:34 +03:00
"nextval('t_seq') as t_seq, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
2018-10-16 22:53:59 +03:00
"from generate_series(0,2560) i")
2017-07-12 17:28:28 +03:00
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2018-04-28 18:49:34 +03:00
master.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
2017-07-12 17:28:28 +03:00
2017-09-28 10:32:06 +03:00
# Take FULL backup to clean every ptrack
2018-04-28 18:49:34 +03:00
self.backup_node(
backup_dir,
'replica',
replica,
options=[
'-j10', '--stream',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
2017-10-09 15:32:48 +03:00
master.safe_psql('postgres', 'checkpoint')
2017-07-12 17:28:28 +03:00
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2017-09-28 10:32:06 +03:00
# Update everything and vacuum it
2018-04-28 18:49:34 +03:00
master.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), "
"text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2017-07-12 17:28:28 +03:00
master.safe_psql('postgres', 'vacuum t_heap')
2017-09-28 10:32:06 +03:00
# Take PTRACK backup to clean every ptrack
2018-04-28 18:49:34 +03:00
backup_id = self.backup_node(
backup_dir,
'replica',
replica,
backup_type='ptrack',
options=[
'-j10', '--stream',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
2017-07-12 17:28:28 +03:00
master.safe_psql('postgres', 'checkpoint')
2017-10-09 15:32:48 +03:00
2017-07-12 17:28:28 +03:00
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2017-05-03 18:05:19 +03:00
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2017-09-28 10:32:06 +03:00
# Update everything and vacuum it
2018-04-28 18:49:34 +03:00
master.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2017-09-28 10:32:06 +03:00
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# Take PAGE backup to clean every ptrack
2018-04-28 18:49:34 +03:00
self.backup_node(
backup_dir,
'replica',
replica,
backup_type='page',
options=[
'-j10', '--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
2017-10-09 15:32:48 +03:00
master.safe_psql('postgres', 'checkpoint')
2017-09-28 10:32:06 +03:00
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2017-07-12 17:28:28 +03:00
2017-06-27 08:42:52 +03:00
# Clean after yourself
2017-07-12 17:28:28 +03:00
self.del_test_dir(module_name, fname)