mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-01-27 12:01:29 +02:00
ptrack page header fix
This commit is contained in:
parent
af29599e79
commit
ec8d9b8ce4
@ -17,7 +17,6 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
def test_backup_modes_archive(self):
|
||||
"""standart backup modes with ARCHIVE WAL method"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -74,7 +73,6 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
def test_smooth_checkpoint(self):
|
||||
"""full backup with smooth checkpoint"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -94,7 +92,6 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
def test_page_backup_without_full(self):
|
||||
"""page-level backup without validated full backup"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -114,10 +111,12 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("123")
|
||||
def test_ptrack_threads(self):
|
||||
"""ptrack multi thread backup mode"""
|
||||
node = self.make_bnode(
|
||||
base_dir="tmp_dirs/backup/ptrack_threads_4",
|
||||
options={"ptrack_enable": "on", 'max_wal_senders': '2'}
|
||||
)
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on', 'max_wal_senders': '2'}
|
||||
)
|
||||
node.start()
|
||||
self.assertEqual(self.init_pb(node), six.b(""))
|
||||
|
||||
@ -137,7 +136,6 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
def test_ptrack_threads_stream(self):
|
||||
"""ptrack multi thread backup mode and stream"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/backup/{0}".format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
|
@ -1,7 +1,7 @@
|
||||
|
||||
pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
||||
|
||||
pg_probackup help
|
||||
pg_probackup help [COMMAND]
|
||||
|
||||
pg_probackup version
|
||||
|
||||
@ -21,20 +21,20 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
||||
[-d dbname] [-h host] [-p port] [-U username]
|
||||
|
||||
pg_probackup restore -B backup-dir
|
||||
[-D pgdata-dir] [-i backup-id]
|
||||
[-D pgdata-dir] [-i backup-id] [--progress] [-q] [-v]
|
||||
[--time=time|--xid=xid [--inclusive=boolean]]
|
||||
[--timeline=timeline] [-T OLDDIR=NEWDIR]
|
||||
|
||||
pg_probackup validate -B backup-dir
|
||||
[-D pgdata-dir] [-i backup-id]
|
||||
[-D pgdata-dir] [-i backup-id] [--progress] [-q] [-v]
|
||||
[--time=time|--xid=xid [--inclusive=boolean]]
|
||||
[--timeline=timeline] [-T OLDDIR=NEWDIR]
|
||||
[--timeline=timeline]
|
||||
|
||||
pg_probackup show -B backup-dir
|
||||
[-i backup-id]
|
||||
|
||||
pg_probackup delete -B backup-dir
|
||||
[--wal] [-i backup-id | --expired] [--force]
|
||||
[--wal] [-i backup-id | --expired]
|
||||
|
||||
Read the website for details. <https://github.com/postgrespro/pg_probackup>
|
||||
Report bugs to <https://github.com/postgrespro/pg_probackup/issues>.
|
||||
|
@ -1 +1 @@
|
||||
pg_probackup 1.1.11
|
||||
pg_probackup 1.1.11
|
||||
|
@ -9,10 +9,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
super(SimpleTest, self).__init__(*args, **kwargs)
|
||||
|
||||
def teardown(self):
|
||||
# clean_all()
|
||||
stop_all()
|
||||
|
||||
# @unittest.skip("123")
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_clean(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname),
|
||||
@ -45,7 +45,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
|
||||
|
||||
# Update everything, vacuum it and make PTRACK BACKUP
|
||||
@ -62,7 +62,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
||||
# # get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||
# check that ptrack bits are cleaned
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
|
||||
|
||||
@ -81,7 +81,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
||||
# # get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||
# check that ptrack bits are cleaned
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
|
||||
|
||||
|
@ -12,10 +12,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# clean_all()
|
||||
stop_all()
|
||||
|
||||
# @unittest.skip("123")
|
||||
# @unittest.skip("123")
|
||||
def test_ptrack_cluster_btree(self):
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -63,7 +62,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
@ -71,10 +70,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.clean_pb(node)
|
||||
node.stop()
|
||||
|
||||
@unittest.skip("123")
|
||||
# @unittest.skip("123")
|
||||
def test_ptrack_cluster_spgist(self):
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -122,7 +120,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
@ -130,10 +128,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.clean_pb(node)
|
||||
node.stop()
|
||||
|
||||
@unittest.skip("123")
|
||||
# @unittest.skip("123")
|
||||
def test_ptrack_cluster_brin(self):
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -181,7 +178,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
@ -189,10 +186,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.clean_pb(node)
|
||||
node.stop()
|
||||
|
||||
@unittest.skip("123")
|
||||
# @unittest.skip("123")
|
||||
def test_ptrack_cluster_gist(self):
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -240,7 +236,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
@ -248,10 +244,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.clean_pb(node)
|
||||
node.stop()
|
||||
|
||||
@unittest.skip("123")
|
||||
# @unittest.skip("123")
|
||||
def test_ptrack_cluster_gin(self):
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -299,7 +294,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
|
@ -152,29 +152,6 @@ class ProbackupTest(object):
|
||||
def backup_dir(self, node):
|
||||
return os.path.abspath("%s/backup" % node.base_dir)
|
||||
|
||||
def make_bnode(self, base_dir=None, allows_streaming=False, options={}):
|
||||
real_base_dir = os.path.join(self.dir_path, base_dir)
|
||||
shutil.rmtree(real_base_dir, ignore_errors=True)
|
||||
|
||||
node = get_new_node('test', base_dir=real_base_dir)
|
||||
node.init(allows_streaming=allows_streaming)
|
||||
|
||||
if not allows_streaming:
|
||||
node.append_conf("postgresql.auto.conf", "wal_level = hot_standby")
|
||||
node.append_conf("postgresql.auto.conf", "archive_mode = on")
|
||||
node.append_conf(
|
||||
"postgresql.auto.conf",
|
||||
"""archive_command = 'cp "%%p" "%s/%%f"'""" % os.path.abspath(self.arcwal_dir(node))
|
||||
)
|
||||
|
||||
for key, value in six.iteritems(options):
|
||||
node.append_conf("postgresql.conf", "%s = %s" % (key, value))
|
||||
|
||||
return node
|
||||
|
||||
# def print_started(self, fname):
|
||||
# print
|
||||
|
||||
def make_simple_node(self, base_dir=None, set_replication=False,
|
||||
set_archiving=False, initdb_params=[], pg_options={}):
|
||||
real_base_dir = os.path.join(self.dir_path, base_dir)
|
||||
@ -184,6 +161,7 @@ class ProbackupTest(object):
|
||||
node.init(initdb_params=initdb_params)
|
||||
|
||||
# Sane default parameters, not a shit with fsync = off from testgres
|
||||
node.append_conf("postgresql.auto.conf", "{0} = {1}".format('shared_buffers', '10MB'))
|
||||
node.append_conf("postgresql.auto.conf", "{0} = {1}".format('fsync', 'on'))
|
||||
node.append_conf("postgresql.auto.conf", "{0} = {1}".format('wal_level', 'minimal'))
|
||||
|
||||
@ -199,7 +177,6 @@ class ProbackupTest(object):
|
||||
self.set_archiving_conf(node, self.arcwal_dir(node))
|
||||
return node
|
||||
|
||||
|
||||
def create_tblspace_in_node(self, node, tblspc_name, cfs=False):
|
||||
res = node.execute(
|
||||
"postgres", "select exists (select 1 from pg_tablespace where spcname = '{0}')".format(
|
||||
@ -236,12 +213,16 @@ class ProbackupTest(object):
|
||||
os.close(file)
|
||||
return md5_per_page
|
||||
|
||||
def get_ptrack_bits_per_page_for_fork(self, file, size):
|
||||
def get_ptrack_bits_per_page_for_fork(self, node, file, size):
|
||||
if self.get_pgpro_edition(node) == 'enterprise':
|
||||
header_size = 48
|
||||
else:
|
||||
header_size = 24
|
||||
ptrack_bits_for_fork = []
|
||||
byte_size = os.path.getsize(file + '_ptrack')
|
||||
byte_size_minus_header = byte_size - 24
|
||||
byte_size_minus_header = byte_size - header_size
|
||||
file = os.open(file + '_ptrack', os.O_RDONLY)
|
||||
os.lseek(file, 24, 0)
|
||||
os.lseek(file, header_size, 0)
|
||||
lot_of_bytes = os.read(file, byte_size_minus_header)
|
||||
for byte in lot_of_bytes:
|
||||
byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1]
|
||||
@ -316,20 +297,32 @@ class ProbackupTest(object):
|
||||
success = False
|
||||
self.assertEqual(success, True)
|
||||
|
||||
def run_pb(self, command):
|
||||
def run_pb(self, command, async=False):
|
||||
try:
|
||||
# print [self.probackup_path] + command
|
||||
output = subprocess.check_output(
|
||||
[self.probackup_path] + command,
|
||||
stderr=subprocess.STDOUT,
|
||||
env=self.test_env
|
||||
)
|
||||
#print ' '.join(map(str,[self.probackup_path] + command))
|
||||
if async is True:
|
||||
return subprocess.Popen(
|
||||
[self.probackup_path] + command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
env=self.test_env
|
||||
)
|
||||
else:
|
||||
output = subprocess.check_output(
|
||||
[self.probackup_path] + command,
|
||||
stderr=subprocess.STDOUT,
|
||||
env=self.test_env
|
||||
)
|
||||
if command[0] == 'backup':
|
||||
if '-q' in command or '--quiet' in command:
|
||||
return None
|
||||
elif '-v' in command or '--verbose' in command:
|
||||
return output
|
||||
else:
|
||||
# return backup ID
|
||||
return output.split()[2]
|
||||
for line in output.splitlines():
|
||||
if 'INFO: Backup' and 'completed' in line:
|
||||
return line.split()[2]
|
||||
else:
|
||||
return output
|
||||
except subprocess.CalledProcessError as e:
|
||||
@ -346,50 +339,38 @@ class ProbackupTest(object):
|
||||
def clean_pb(self, node):
|
||||
shutil.rmtree(self.backup_dir(node), ignore_errors=True)
|
||||
|
||||
def backup_pb(self, node, backup_type="full", options=[]):
|
||||
def backup_pb(self, node=None, data_dir=None, backup_dir=None, backup_type="full", options=[], async=False):
|
||||
if data_dir is None:
|
||||
data_dir = node.data_dir
|
||||
if backup_dir is None:
|
||||
backup_dir = self.backup_dir(node)
|
||||
|
||||
cmd_list = [
|
||||
"backup",
|
||||
"-D", node.data_dir,
|
||||
"-B", self.backup_dir(node),
|
||||
"-B", backup_dir,
|
||||
"-D", data_dir,
|
||||
"-p", "%i" % node.port,
|
||||
"-d", "postgres"
|
||||
]
|
||||
if backup_type:
|
||||
cmd_list += ["-b", backup_type]
|
||||
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, async)
|
||||
|
||||
def backup_pb_proc(self, node, backup_type="full",
|
||||
stdout=None, stderr=None, options=[]):
|
||||
cmd_list = [
|
||||
self.probackup_path,
|
||||
"backup",
|
||||
"-D", node.data_dir,
|
||||
"-B", self.backup_dir(node),
|
||||
"-p", "%i" % (node.port),
|
||||
"-d", "postgres"
|
||||
]
|
||||
if backup_type:
|
||||
cmd_list += ["-b", backup_type]
|
||||
def restore_pb(self, node=None, backup_dir=None, data_dir=None, id=None, options=[]):
|
||||
if data_dir is None:
|
||||
data_dir = node.data_dir
|
||||
if backup_dir is None:
|
||||
backup_dir = self.backup_dir(node)
|
||||
|
||||
proc = subprocess.Popen(
|
||||
cmd_list + options,
|
||||
stdout=stdout,
|
||||
stderr=stderr
|
||||
)
|
||||
|
||||
return proc
|
||||
|
||||
def restore_pb(self, node, id=None, options=[]):
|
||||
cmd_list = [
|
||||
"restore",
|
||||
"-D", node.data_dir,
|
||||
"-B", self.backup_dir(node)
|
||||
"-B", backup_dir,
|
||||
"-D", data_dir
|
||||
]
|
||||
if id:
|
||||
cmd_list += ["-i", id]
|
||||
|
||||
# print(cmd_list)
|
||||
return self.run_pb(cmd_list + options)
|
||||
|
||||
def show_pb(self, node, id=None, options=[], as_text=False):
|
||||
@ -417,13 +398,17 @@ class ProbackupTest(object):
|
||||
body = body[::-1]
|
||||
# split string in list with string for every header element
|
||||
header_split = re.split(" +", header)
|
||||
# CRUNCH, remove last item, because it empty, like that ''
|
||||
header_split.pop()
|
||||
# Remove empty items
|
||||
for i in header_split:
|
||||
if i == '':
|
||||
header_split.remove(i)
|
||||
for backup_record in body:
|
||||
# split string in list with string for every backup record element
|
||||
backup_record_split = re.split(" +", backup_record)
|
||||
# CRUNCH, remove last item, because it empty, like that ''
|
||||
backup_record_split.pop()
|
||||
# Remove empty items
|
||||
for i in backup_record_split:
|
||||
if i == '':
|
||||
backup_record_split.remove(i)
|
||||
if len(header_split) != len(backup_record_split):
|
||||
print warning.format(
|
||||
header=header, body=body,
|
||||
@ -500,25 +485,34 @@ class ProbackupTest(object):
|
||||
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
|
||||
return out_dict
|
||||
|
||||
def set_archiving_conf(self, node, archive_dir):
|
||||
def set_archiving_conf(self, node, archive_dir=False, replica=False):
|
||||
if not archive_dir:
|
||||
archive_dir = self.arcwal_dir(node)
|
||||
|
||||
if replica:
|
||||
archive_mode = 'always'
|
||||
node.append_conf('postgresql.auto.conf', 'hot_standby = on')
|
||||
else:
|
||||
archive_mode = 'on'
|
||||
|
||||
node.append_conf(
|
||||
"postgresql.auto.conf",
|
||||
"wal_level = archive"
|
||||
)
|
||||
node.append_conf(
|
||||
"postgresql.auto.conf",
|
||||
"archive_mode = on"
|
||||
"archive_mode = {0}".format(archive_mode)
|
||||
)
|
||||
if os.name == 'posix':
|
||||
node.append_conf(
|
||||
"postgresql.auto.conf",
|
||||
"archive_command = 'test ! -f {0}/%f && cp %p {0}/%f'".format(archive_dir)
|
||||
)
|
||||
elif os.name == 'nt':
|
||||
node.append_conf(
|
||||
"postgresql.auto.conf",
|
||||
"archive_command = 'copy %p {0}\\%f'".format(archive_dir)
|
||||
)
|
||||
#elif os.name == 'nt':
|
||||
# node.append_conf(
|
||||
# "postgresql.auto.conf",
|
||||
# "archive_command = 'copy %p {0}\\%f'".format(archive_dir)
|
||||
# )
|
||||
|
||||
def wrong_wal_clean(self, node, wal_size):
|
||||
wals_dir = os.path.join(self.backup_dir(node), "wal")
|
||||
@ -536,4 +530,9 @@ class ProbackupTest(object):
|
||||
var = node.execute("postgres", "select setting from pg_settings where name = 'wal_block_size'")
|
||||
return int(var[0][0])
|
||||
|
||||
# def ptrack_node(self, ptrack_enable=False, wal_level='minimal', max_wal_senders='2', allow_replication=True)
|
||||
def get_pgpro_edition(self, node):
|
||||
if node.execute("postgres", "select exists(select 1 from pg_proc where proname = 'pgpro_edition')")[0][0]:
|
||||
var = node.execute("postgres", "select pgpro_edition()")
|
||||
return str(var[0][0])
|
||||
else:
|
||||
return False
|
||||
|
@ -17,7 +17,6 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
def test_ptrack_recovery(self):
|
||||
fname = self.id().split(".")[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -49,7 +48,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||
# check that ptrack has correct bits after recovery
|
||||
self.check_ptrack_recovery(idx_ptrack[i])
|
||||
|
||||
|
@ -17,7 +17,6 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
def test_ptrack_recovery(self):
|
||||
fname = self.id().split(".")[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/{0}".format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -51,7 +50,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
for i in idx_ptrack:
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['size'])
|
||||
# check that ptrack has correct bits after recovery
|
||||
self.check_ptrack_recovery(idx_ptrack[i])
|
||||
|
||||
|
@ -12,10 +12,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# clean_all()
|
||||
stop_all()
|
||||
|
||||
# @unittest.skip("123")
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum(self):
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -51,7 +51,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.backup_pb(node, backup_type='full', options=['-j100', '--stream'])
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Delete some rows, vacuum it and make checkpoint
|
||||
@ -69,7 +69,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
|
@ -14,7 +14,6 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
stop_all()
|
||||
|
||||
def test_ptrack_vacuum_bits_frozen(self):
|
||||
print 'test_ptrack_vacuum_bits_frozen started'
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_vacuum_bits_frozen",
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -60,7 +59,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
|
@ -14,7 +14,6 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
stop_all()
|
||||
|
||||
def test_ptrack_vacuum_bits_visibility(self):
|
||||
print 'test_ptrack_vacuum_bits_visibility started'
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_vacuum_bits_visibility",
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -60,7 +59,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
|
@ -27,7 +27,6 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
def test_ptrack_vacuum_full(self):
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir='tmp_dirs/ptrack/{0}'.format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -75,7 +74,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
|
||||
# compare pages and check ptrack sanity, the most important part
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
|
@ -14,7 +14,6 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
stop_all()
|
||||
|
||||
def test_ptrack_vacuum_truncate(self):
|
||||
print 'test_ptrack_vacuum_truncate started'
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/ptrack/test_ptrack_vacuum_truncate",
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums', '-A trust'],
|
||||
@ -62,7 +61,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
node, idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
|
@ -22,7 +22,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_full_to_latest(self):
|
||||
"""recovery to latest from full backup"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -61,7 +60,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_full_page_to_latest(self):
|
||||
"""recovery to latest from full + page backups"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -101,7 +99,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_to_timeline(self):
|
||||
"""recovery to target timeline"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -156,7 +153,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_to_time(self):
|
||||
"""recovery to target timeline"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -195,7 +191,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_to_xid(self):
|
||||
"""recovery to target xid"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -249,7 +244,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_full_ptrack(self):
|
||||
"""recovery to latest from full + ptrack backups"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -297,7 +291,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_full_ptrack_ptrack(self):
|
||||
"""recovery to latest from full + ptrack + ptrack backups"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -352,7 +345,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_full_ptrack_stream(self):
|
||||
"""recovery in stream mode to latest from full + ptrack backups"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -397,7 +389,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_full_ptrack_under_load(self):
|
||||
"""recovery to latest from full + ptrack backups with loads when ptrack backup do"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
set_replication=True,
|
||||
@ -456,7 +447,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_full_under_load_ptrack(self):
|
||||
"""recovery to latest from full + page backups with loads when full backup do"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
set_replication=True,
|
||||
@ -516,7 +506,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_to_xid_inclusive(self):
|
||||
"""recovery with target inclusive false"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -575,7 +564,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_with_tablespace_mapping_1(self):
|
||||
"""recovery using tablespace-mapping option"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -663,7 +651,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
def test_restore_with_tablespace_mapping_2(self):
|
||||
"""recovery using tablespace-mapping option and page backup"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
|
@ -19,7 +19,6 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
def test_retention_redundancy_1(self):
|
||||
"""purge backups using redundancy-based retention policy"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/retention/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -65,7 +64,6 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
def test_retention_window_2(self):
|
||||
"""purge backups using window-based retention policy"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/retention/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
|
@ -12,18 +12,14 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ValidateTest, self).__init__(*args, **kwargs)
|
||||
|
||||
# @classmethod
|
||||
# def tearDownClass(cls):
|
||||
# try:
|
||||
# stop_all()
|
||||
# except:
|
||||
# pass
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
stop_all()
|
||||
|
||||
# @unittest.skip("123")
|
||||
def test_validate_wal_1(self):
|
||||
"""recovery to latest from full backup"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '\n {0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -62,19 +58,15 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format(
|
||||
after_backup_time - timedelta(days=2))])
|
||||
# we should die here because exception is what we expect to happen
|
||||
exit(1)
|
||||
self.assertEqual(1, 0, "Error in validation is expected because of validation of unreal time")
|
||||
except ProbackupException, e:
|
||||
self.assertEqual(
|
||||
e.message,
|
||||
'ERROR: Full backup satisfying target options is not found.\n'
|
||||
)
|
||||
self.assertEqual(e.message, 'ERROR: Full backup satisfying target options is not found.\n')
|
||||
|
||||
# Validate to unreal time #2
|
||||
try:
|
||||
self.validate_pb(node, options=["--time='{:%Y-%m-%d %H:%M:%S}'".format(
|
||||
after_backup_time + timedelta(days=2))])
|
||||
# we should die here because exception is what we expect to happen
|
||||
exit(1)
|
||||
self.assertEqual(1, 0, "Error in validation is expected because of validation of unreal time")
|
||||
except ProbackupException, e:
|
||||
self.assertEqual(
|
||||
True,
|
||||
@ -95,8 +87,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
# Validate to unreal xid
|
||||
try:
|
||||
self.validate_pb(node, options=["--xid=%d" % (int(target_xid) + 1000)])
|
||||
# we should die here because exception is what we expect to happen
|
||||
exit(1)
|
||||
self.assertEqual(1, 0, "Error in validation is expected because of validation of unreal xid")
|
||||
except ProbackupException, e:
|
||||
self.assertEqual(
|
||||
True,
|
||||
@ -120,22 +111,16 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
try:
|
||||
self.validate_pb(node, id_backup, options=['--xid=%s' % target_xid])
|
||||
# we should die here because exception is what we expect to happen
|
||||
exit(1)
|
||||
self.assertEqual(1, 0, "Expecting Error because of wal segment corruption")
|
||||
except ProbackupException, e:
|
||||
self.assertEqual(
|
||||
True,
|
||||
'Possible WAL CORRUPTION' in e.message
|
||||
)
|
||||
self.assertTrue(True, 'Possible WAL CORRUPTION' in e.message)
|
||||
|
||||
try:
|
||||
self.validate_pb(node)
|
||||
# we should die here because exception is what we expect to happen
|
||||
exit(1)
|
||||
self.assertEqual(1, 0, "Expecting Error because of wal segment corruption")
|
||||
except ProbackupException, e:
|
||||
self.assertEqual(
|
||||
True,
|
||||
'Possible WAL CORRUPTION' in e.message
|
||||
)
|
||||
self.assertTrue(True, 'Possible WAL CORRUPTION' in e.message)
|
||||
|
||||
node.stop()
|
||||
|
||||
@ -143,7 +128,6 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
def test_validate_wal_lost_segment_1(self):
|
||||
"""Loose segment which belong to some backup"""
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -167,19 +151,15 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
os.remove(os.path.join(self.backup_dir(node), "wal", wals[1]))
|
||||
try:
|
||||
self.validate_pb(node)
|
||||
# we should die here because exception is what we expect to happen
|
||||
exit(1)
|
||||
self.assertEqual(1, 0, "Expecting Error because of wal segment disappearance")
|
||||
except ProbackupException, e:
|
||||
self.assertEqual(
|
||||
True,
|
||||
'is absent' in e.message
|
||||
)
|
||||
self.assertTrue('is absent' in e.message)
|
||||
node.stop()
|
||||
|
||||
@unittest.expectedFailure
|
||||
def test_validate_wal_lost_segment_2(self):
|
||||
"""Loose segment located between backups """
|
||||
fname = self.id().split('.')[3]
|
||||
print '{0} started'.format(fname)
|
||||
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
|
||||
set_archiving=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
@ -209,7 +189,6 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
wals = map(int, wals)
|
||||
|
||||
# delete last wal segment
|
||||
print os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals)))
|
||||
os.remove(os.path.join(self.backup_dir(node), "wal", '0000000' + str(max(wals))))
|
||||
|
||||
# Need more accurate error message about loosing wal segment between backups
|
||||
|
Loading…
x
Reference in New Issue
Block a user