1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2024-11-24 08:52:38 +02:00

Merge branch 'master' of git.postgrespro.ru:pgpro-dev/pg_probackup

This commit is contained in:
Anastasia 2017-05-04 11:52:17 +03:00
commit c5de04416f
17 changed files with 194 additions and 112 deletions

View File

@ -17,8 +17,8 @@ def load_tests(loader, tests, pattern):
suite.addTests(loader.loadTestsFromModule(backup_test))
suite.addTests(loader.loadTestsFromModule(delete_test))
suite.addTests(loader.loadTestsFromModule(restore_test))
suite.addTests(loader.loadTestsFromModule(validate_test))
suite.addTests(loader.loadTestsFromModule(retention_test))
# suite.addTests(loader.loadTestsFromModule(validate_test))
# suite.addTests(loader.loadTestsFromModule(retention_test))
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))

View File

@ -50,7 +50,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
with open(path.join(node.logs_dir, "backup_page.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"]))
print self.show_pb(node)
# print self.show_pb(node)
show_backup = self.show_pb(node)[1]
self.assertEqual(show_backup['Status'], six.b("OK"))
self.assertEqual(show_backup['Mode'], six.b("PAGE"))

24
tests/class_check.py Normal file
View File

@ -0,0 +1,24 @@
class Base(object):
def __init__(self):
self.a = 10
def func(self, arg1, arg2):
print 'Child {0}, a = {1}'.format(arg1, arg2)
class ChildA(Base):
def __init__(self):
Base.__init__(self)
b = 5
c = b + self.a
print 'Child A, a = {0}'.format(c)
class ChildB(Base):
def __init__(self):
super(ChildB, self).__init__()
b = 6
c = b + self.a
self.func('B', c)
#ChildA()
ChildB()

15
tests/class_check1.py Normal file
View File

@ -0,0 +1,15 @@
class Foo(object):
def __init__(self, *value1, **value2):
# do something with the values
print 'I think something is being called here'
# print value1, value2
class MyFoo(Foo):
def __init__(self, *args, **kwargs):
# do something else, don't care about the args
print args, kwargs
super(MyFoo, self).__init__(*args, **kwargs)
foo = MyFoo('Python', 2.7, stack='overflow', ololo='lalala')

23
tests/class_check2.py Normal file
View File

@ -0,0 +1,23 @@
class Base(object):
def __init__(self):
self.a = 10
self.b = 1
# def func(self, arg1, arg2):
# print 'Child {0}, a = {1}'.format(arg1, arg2)
class ChildA(Base):
def __init__(self):
Base.__init__(self)
self.b = self.b + 1
class ChildB(ChildA):
def __init__(self):
ChildA.__init__(self)
print 'b = {0}'.format(self.b)
# c = b + self.a
#ChildA()
ChildB()

View File

@ -65,7 +65,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
# we should die here because exception is what we expect to happen
exit(1)
except ProbackupException, e:
print e.message
# print e.message
self.assertEqual(
e.message,
'ERROR: required parameter not specified: BACKUP_MODE (-b, --backup-mode)\n'

View File

@ -1,8 +1,6 @@
import unittest
from sys import exit
from testgres import get_new_node, stop_all
#import os
from os import path, open, lseek, read, close, O_RDONLY
from .ptrack_helpers import ProbackupTest, idx_ptrack
@ -48,7 +46,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
self.check_ptrack_clean(idx_ptrack[i])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything, vacuum it and make PTRACK BACKUP
node.psql('postgres', 'update t_heap set text = md5(text), tsvector = md5(repeat(tsvector::text, 10))::tsvector;')
@ -66,34 +64,29 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i])
#
# # Update everything, vacuum it and make PAGE BACKUP
# node.psql('postgres', 'update t_heap set text = md5(text), tsvector = md5(repeat(tsvector::text, 10))::tsvector;')
# node.psql('postgres', 'vacuum t_heap')
#
# # Make page backup to clean every ptrack
# self.backup_pb(node, backup_type='page', options=['-j100'])
# node.psql('postgres', 'checkpoint')
#
# for i in idx_ptrack:
# # get new size of heap and indexes and calculate it in pages
# idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# # update path to heap and index files in case they`ve changed
# idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# # # get ptrack for every idx
# idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
# idx_ptrack[i]['path'], idx_ptrack[i]['size'])
# # check that ptrack bits are cleaned
# self.check_ptrack_clean(idx_ptrack[i])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# print self.clean_pb(node)
# for i in self.show_pb(node):
# print i
self.show_pb(node, as_text=True)
# Update everything, vacuum it and make PAGE BACKUP
node.psql('postgres', 'update t_heap set text = md5(text), tsvector = md5(repeat(tsvector::text, 10))::tsvector;')
node.psql('postgres', 'vacuum t_heap')
# Make page backup to clean every ptrack
self.backup_pb(node, backup_type='page', options=['-j100'])
node.psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
print self.show_pb(node, as_text=True)
self.clean_pb(node)
# print a
# print a.mode
node.stop()
if __name__ == '__main__':

View File

@ -1,21 +1,8 @@
import unittest
from sys import exit
from testgres import get_new_node, stop_all
#import os
from os import path, open, lseek, read, close, O_RDONLY
from .ptrack_helpers import ProbackupTest, idx_ptrack
# res = node.execute('postgres', 'show fsync')
# print res[0][0]
# res = node.execute('postgres', 'show wal_level')
# print res[0][0]
# a = ProbackupTest
# res = node.execute('postgres', 'select 1')`
# self.assertEqual(len(res), 1)
# self.assertEqual(res[0][0], 1)
# node.stop()
# a = self.backup_dir(node)
class SimpleTest(ProbackupTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
@ -27,8 +14,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("123")
def test_ptrack_cluster_btree(self):
print 'test_ptrack_cluster_btree started'
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_cluster_btree",
fname = self.id().split('.')[3]
print '{0} started'.format(fname)
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
set_replication=True,
initdb_params=['--data-checksums', '-A trust'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
@ -56,7 +44,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['old_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.init_pb(node)
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
@ -72,9 +60,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['new_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
@ -82,9 +71,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.clean_pb(node)
node.stop()
@unittest.skip("123")
def test_ptrack_cluster_spgist(self):
print 'test_ptrack_cluster_spgist started'
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_cluster_spgist",
fname = self.id().split('.')[3]
print '{0} started'.format(fname)
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
set_replication=True,
initdb_params=['--data-checksums', '-A trust'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
@ -112,7 +103,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['old_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.init_pb(node)
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
@ -128,9 +119,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['new_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
@ -138,9 +130,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.clean_pb(node)
node.stop()
@unittest.skip("123")
def test_ptrack_cluster_brin(self):
print 'test_ptrack_cluster_brin started'
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_cluster_brin",
fname = self.id().split('.')[3]
print '{0} started'.format(fname)
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
set_replication=True,
initdb_params=['--data-checksums', '-A trust'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
@ -168,7 +162,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['old_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.init_pb(node)
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
@ -184,9 +178,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['new_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
@ -194,9 +189,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.clean_pb(node)
node.stop()
@unittest.skip("123")
def test_ptrack_cluster_gist(self):
print 'test_ptrack_cluster_gist started'
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_cluster_gist",
fname = self.id().split('.')[3]
print '{0} started'.format(fname)
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
set_replication=True,
initdb_params=['--data-checksums', '-A trust'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
@ -224,7 +221,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['old_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.init_pb(node)
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
@ -240,9 +237,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['new_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
@ -250,9 +248,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
self.clean_pb(node)
node.stop()
@unittest.skip("123")
def test_ptrack_cluster_gin(self):
print 'test_ptrack_cluster_gin started'
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_cluster_gin",
fname = self.id().split('.')[3]
print '{0} started'.format(fname)
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
set_replication=True,
initdb_params=['--data-checksums', '-A trust'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
@ -280,7 +280,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['old_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.init_pb(node)
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
@ -296,9 +296,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['new_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -194,7 +194,7 @@ class ProbackupTest(object):
node.set_replication_conf()
# Setup archiving for node
if set_archiving:
node.set_archiving_conf(self.arcwal_dir(node))
self.set_archiving_conf(node, self.arcwal_dir(node))
return node
@ -223,7 +223,7 @@ class ProbackupTest(object):
return os.path.join(node.base_dir, 'data',
node.execute("postgres", "select pg_relation_filepath('{0}')".format(fork_name))[0][0])
def get_md5_per_page_for_fork(self, size, file):
def get_md5_per_page_for_fork(self, file, size):
file = os.open(file, os.O_RDONLY)
offset = 0
md5_per_page = {}
@ -258,7 +258,6 @@ class ProbackupTest(object):
size = idx_dict['new_size']
else:
size = idx_dict['old_size']
for PageNum in range(size):
if PageNum not in idx_dict['old_pages']:
# Page was not present before, meaning that relation got bigger
@ -272,9 +271,9 @@ class ProbackupTest(object):
if PageNum not in idx_dict['new_pages']:
# Page is not present now, meaning that relation got smaller
# Ptrack should be equal to 0, We are not freaking out about false positive stuff
if idx_dict['ptrack'][PageNum] != 0:
print 'Page Number {0} of type {1} was deleted, but ptrack value is {2}'.format(
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum])
#if idx_dict['ptrack'][PageNum] != 0:
# print 'Page Number {0} of type {1} was deleted, but ptrack value is {2}'.format(
# PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum])
continue
# Ok, all pages in new_pages that do not have corresponding page in old_pages
# are been dealt with. We can now safely proceed to comparing old and new pages
@ -307,9 +306,8 @@ class ProbackupTest(object):
success = False
self.assertEqual(success, True)
def check_ptrack_clean(self, idx_dict):
def check_ptrack_clean(self, idx_dict, size):
success = True
size = idx_dict['size']
for PageNum in range(size):
if idx_dict['ptrack'][PageNum] != 0:
print 'Ptrack for Page Number {0} of Type {1} should be clean, but ptrack value is {2}. THIS IS BAD'.format(
@ -320,7 +318,7 @@ class ProbackupTest(object):
def run_pb(self, command):
try:
# print [self.probackup_path] + command
# print [self.probackup_path] + command
output = subprocess.check_output(
[self.probackup_path] + command,
stderr=subprocess.STDOUT,
@ -437,10 +435,10 @@ class ProbackupTest(object):
else:
# cut out empty lines and lines started with #
# and other garbage then reconstruct it as dictionary
print show_splitted
# print show_splitted
sanitized_show = [item for item in show_splitted if item]
sanitized_show = [item for item in sanitized_show if not item.startswith('#')]
print sanitized_show
# print sanitized_show
for line in sanitized_show:
name, var = line.partition(" = ")[::2]
var = var.strip('"')
@ -495,6 +493,26 @@ class ProbackupTest(object):
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
return out_dict
def set_archiving_conf(self, node, archive_dir):
node.append_conf(
"postgresql.auto.conf",
"wal_level = archive"
)
node.append_conf(
"postgresql.auto.conf",
"archive_mode = on"
)
if os.name == 'posix':
node.append_conf(
"postgresql.auto.conf",
"archive_command = 'test ! -f {0}/%f && cp %p {0}/%f'".format(archive_dir)
)
elif os.name == 'nt':
node.append_conf(
"postgresql.auto.conf",
"archive_command = 'copy %p {0}\\%f'".format(archive_dir)
)
def wrong_wal_clean(self, node, wal_size):
wals_dir = os.path.join(self.backup_dir(node), "wal")
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))]

View File

@ -48,7 +48,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
# check that ptrack has correct bits after recovery
self.check_ptrack_recovery(idx_ptrack[i])

View File

@ -50,7 +50,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
for i in idx_ptrack:
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
# check that ptrack has correct bits after recovery
self.check_ptrack_recovery(idx_ptrack[i])

View File

@ -57,14 +57,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['old_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.init_pb(node)
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
self.check_ptrack_clean(idx_ptrack[i])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Delete some rows, vacuum it and make checkpoint
node.psql('postgres', 'delete from t_heap where id%2 = 1')
@ -78,9 +79,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['new_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -42,7 +42,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['old_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.init_pb(node)
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
@ -57,9 +57,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['new_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -42,7 +42,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['old_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.init_pb(node)
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
@ -57,9 +57,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['new_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -55,7 +55,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['old_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.init_pb(node)
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
@ -71,11 +71,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['new_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity, most important
# compare pages and check ptrack sanity, the most important part
self.check_ptrack_sanity(idx_ptrack[i])
self.clean_pb(node)

View File

@ -43,7 +43,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['old_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.init_pb(node)
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
@ -59,9 +59,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['new_size'], idx_ptrack[i]['path'])
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(idx_ptrack[i]['path'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])

View File

@ -406,7 +406,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
set_archiving=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
@ -418,10 +418,10 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.skipTest("ptrack not supported")
return
node.append_conf("pg_hba.conf", "local replication all trust")
node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
node.append_conf("postgresql.conf", "ptrack_enable = on")
node.append_conf("postgresql.conf", "max_wal_senders = 1")
#node.append_conf("pg_hba.conf", "local replication all trust")
#node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
#node.append_conf("postgresql.conf", "ptrack_enable = on")
#node.append_conf("postgresql.conf", "max_wal_senders = 1")
node.restart()
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
@ -471,7 +471,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
set_archiving=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'}
)
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
@ -483,10 +483,10 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.skipTest("ptrack not supported")
return
node.append_conf("pg_hba.conf", "local replication all trust")
node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
node.append_conf("postgresql.conf", "ptrack_enable = on")
node.append_conf("postgresql.conf", "max_wal_senders = 1")
#node.append_conf("pg_hba.conf", "local replication all trust")
#node.append_conf("pg_hba.conf", "host replication all 127.0.0.1/32 trust")
#node.append_conf("postgresql.conf", "ptrack_enable = on")
#node.append_conf("postgresql.conf", "max_wal_senders = 1")
node.restart()
pgbench = node.pgbench(