2017-05-03 13:14:48 +02:00
|
|
|
# you need os for unittest to work
|
|
|
|
import os
|
2017-06-27 07:42:52 +02:00
|
|
|
from sys import exit, argv, version_info
|
2017-05-03 13:14:48 +02:00
|
|
|
import subprocess
|
|
|
|
import shutil
|
|
|
|
import six
|
2017-10-04 16:03:03 +02:00
|
|
|
import testgres
|
2017-05-03 13:14:48 +02:00
|
|
|
import hashlib
|
|
|
|
import re
|
2018-12-17 16:54:38 +02:00
|
|
|
import getpass
|
2018-01-10 15:03:33 +02:00
|
|
|
import select
|
|
|
|
import psycopg2
|
|
|
|
from time import sleep
|
2018-01-27 17:31:19 +02:00
|
|
|
import re
|
2018-06-02 19:35:37 +02:00
|
|
|
import json
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
idx_ptrack = {
|
2017-12-22 01:04:59 +02:00
|
|
|
't_heap': {
|
|
|
|
'type': 'heap'
|
2017-05-03 13:14:48 +02:00
|
|
|
},
|
2017-12-22 01:04:59 +02:00
|
|
|
't_btree': {
|
|
|
|
'type': 'btree',
|
|
|
|
'column': 'text',
|
|
|
|
'relation': 't_heap'
|
2017-05-03 13:14:48 +02:00
|
|
|
},
|
2017-12-22 01:04:59 +02:00
|
|
|
't_seq': {
|
|
|
|
'type': 'seq',
|
|
|
|
'column': 't_seq',
|
|
|
|
'relation': 't_heap'
|
2017-07-12 16:28:28 +02:00
|
|
|
},
|
2017-12-22 01:04:59 +02:00
|
|
|
't_spgist': {
|
|
|
|
'type': 'spgist',
|
|
|
|
'column': 'text',
|
|
|
|
'relation': 't_heap'
|
2017-05-03 13:14:48 +02:00
|
|
|
},
|
2017-12-22 01:04:59 +02:00
|
|
|
't_brin': {
|
|
|
|
'type': 'brin',
|
|
|
|
'column': 'text',
|
|
|
|
'relation': 't_heap'
|
2017-05-03 13:14:48 +02:00
|
|
|
},
|
2017-12-22 01:04:59 +02:00
|
|
|
't_gist': {
|
|
|
|
'type': 'gist',
|
|
|
|
'column': 'tsvector',
|
|
|
|
'relation': 't_heap'
|
2017-05-03 13:14:48 +02:00
|
|
|
},
|
2017-12-22 01:04:59 +02:00
|
|
|
't_gin': {
|
|
|
|
'type': 'gin',
|
|
|
|
'column': 'tsvector',
|
|
|
|
'relation': 't_heap'
|
2017-05-03 13:14:48 +02:00
|
|
|
},
|
2018-10-18 08:46:02 +02:00
|
|
|
't_hash': {
|
|
|
|
'type': 'hash',
|
|
|
|
'column': 'id',
|
|
|
|
'relation': 't_heap'
|
2018-10-18 15:37:34 +02:00
|
|
|
},
|
|
|
|
't_bloom': {
|
|
|
|
'type': 'bloom',
|
|
|
|
'column': 'id',
|
|
|
|
'relation': 't_heap'
|
2018-10-18 08:46:02 +02:00
|
|
|
}
|
2017-05-03 13:14:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
warning = """
|
|
|
|
Wrong splint in show_pb
|
|
|
|
Original Header:
|
|
|
|
{header}
|
|
|
|
Original Body:
|
|
|
|
{body}
|
|
|
|
Splitted Header
|
|
|
|
{header_split}
|
|
|
|
Splitted Body
|
|
|
|
{body_split}
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
def dir_files(base_dir):
|
|
|
|
out_list = []
|
|
|
|
for dir_name, subdir_list, file_list in os.walk(base_dir):
|
|
|
|
if dir_name != base_dir:
|
|
|
|
out_list.append(os.path.relpath(dir_name, base_dir))
|
|
|
|
for fname in file_list:
|
2017-12-22 01:04:59 +02:00
|
|
|
out_list.append(
|
|
|
|
os.path.relpath(os.path.join(
|
|
|
|
dir_name, fname), base_dir)
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
out_list.sort()
|
|
|
|
return out_list
|
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
|
2017-12-22 00:39:16 +02:00
|
|
|
def is_enterprise():
|
2017-12-22 01:04:59 +02:00
|
|
|
# pg_config --help
|
2018-12-17 16:54:38 +02:00
|
|
|
if os.name == 'posix':
|
|
|
|
cmd = [os.environ['PG_CONFIG'], '--help']
|
|
|
|
|
|
|
|
elif os.name == 'nt':
|
|
|
|
cmd = [[os.environ['PG_CONFIG']], ['--help']]
|
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
p = subprocess.Popen(
|
2018-12-17 16:54:38 +02:00
|
|
|
cmd,
|
2017-12-22 01:04:59 +02:00
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE
|
|
|
|
)
|
2018-03-03 10:42:10 +02:00
|
|
|
if b'postgrespro.ru' in p.communicate()[0]:
|
2017-12-22 00:39:16 +02:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
class ProbackupException(Exception):
|
|
|
|
def __init__(self, message, cmd):
|
|
|
|
self.message = message
|
|
|
|
self.cmd = cmd
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd)
|
|
|
|
|
|
|
|
|
2018-07-14 01:45:17 +02:00
|
|
|
def slow_start(self, replica=False):
|
2018-07-11 09:50:38 +02:00
|
|
|
|
|
|
|
# wait for https://github.com/postgrespro/testgres/pull/50
|
2019-01-14 18:58:15 +02:00
|
|
|
# self.start()
|
|
|
|
# self.poll_query_until(
|
|
|
|
# "postgres",
|
|
|
|
# "SELECT not pg_is_in_recovery()",
|
|
|
|
# suppress={testgres.NodeConnection})
|
2019-01-10 17:12:00 +02:00
|
|
|
if replica:
|
2019-01-14 18:58:15 +02:00
|
|
|
query = 'SELECT pg_is_in_recovery()'
|
2019-01-14 19:20:24 +02:00
|
|
|
else:
|
|
|
|
query = 'SELECT not pg_is_in_recovery()'
|
2018-07-15 15:37:48 +02:00
|
|
|
|
2019-01-14 18:58:15 +02:00
|
|
|
self.start()
|
|
|
|
while True:
|
|
|
|
try:
|
2019-01-14 19:20:24 +02:00
|
|
|
if self.safe_psql('postgres', query) == 't\n':
|
|
|
|
break
|
2019-01-14 18:58:15 +02:00
|
|
|
except testgres.QueryException as e:
|
|
|
|
if 'database system is starting up' in e[0]:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
raise e
|
2018-07-11 09:50:38 +02:00
|
|
|
|
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
class ProbackupTest(object):
|
2017-12-22 00:39:16 +02:00
|
|
|
# Class attributes
|
|
|
|
enterprise = is_enterprise()
|
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super(ProbackupTest, self).__init__(*args, **kwargs)
|
2017-07-12 16:28:28 +02:00
|
|
|
if '-v' in argv or '--verbose' in argv:
|
|
|
|
self.verbose = True
|
|
|
|
else:
|
|
|
|
self.verbose = False
|
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
self.test_env = os.environ.copy()
|
|
|
|
envs_list = [
|
2018-12-17 16:44:42 +02:00
|
|
|
'LANGUAGE',
|
|
|
|
'LC_ALL',
|
|
|
|
'PGCONNECT_TIMEOUT',
|
|
|
|
'PGDATA',
|
|
|
|
'PGDATABASE',
|
|
|
|
'PGHOSTADDR',
|
|
|
|
'PGREQUIRESSL',
|
|
|
|
'PGSERVICE',
|
|
|
|
'PGSSLMODE',
|
|
|
|
'PGUSER',
|
|
|
|
'PGPORT',
|
|
|
|
'PGHOST'
|
2017-05-03 13:14:48 +02:00
|
|
|
]
|
|
|
|
|
|
|
|
for e in envs_list:
|
|
|
|
try:
|
|
|
|
del self.test_env[e]
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2018-12-17 16:44:42 +02:00
|
|
|
self.test_env['LC_MESSAGES'] = 'C'
|
|
|
|
self.test_env['LC_TIME'] = 'C'
|
2017-10-11 17:08:56 +02:00
|
|
|
|
|
|
|
self.paranoia = False
|
2017-12-13 23:29:39 +02:00
|
|
|
if 'PG_PROBACKUP_PARANOIA' in self.test_env:
|
|
|
|
if self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON':
|
2017-10-11 17:08:56 +02:00
|
|
|
self.paranoia = True
|
|
|
|
|
2017-12-13 23:39:19 +02:00
|
|
|
self.archive_compress = False
|
2017-12-13 23:29:39 +02:00
|
|
|
if 'ARCHIVE_COMPRESSION' in self.test_env:
|
|
|
|
if self.test_env['ARCHIVE_COMPRESSION'] == 'ON':
|
|
|
|
self.archive_compress = True
|
2017-12-05 16:25:43 +02:00
|
|
|
try:
|
2018-01-27 17:31:19 +02:00
|
|
|
testgres.configure_testgres(
|
|
|
|
cache_initdb=False,
|
|
|
|
cached_initdb_dir=False,
|
|
|
|
cache_pg_config=False,
|
|
|
|
node_cleanup_full=False)
|
2017-12-05 16:25:43 +02:00
|
|
|
except:
|
|
|
|
pass
|
2017-12-13 23:29:39 +02:00
|
|
|
|
2017-05-25 11:53:33 +02:00
|
|
|
self.helpers_path = os.path.dirname(os.path.realpath(__file__))
|
2017-12-22 01:04:59 +02:00
|
|
|
self.dir_path = os.path.abspath(
|
|
|
|
os.path.join(self.helpers_path, os.pardir)
|
|
|
|
)
|
|
|
|
self.tmp_path = os.path.abspath(
|
|
|
|
os.path.join(self.dir_path, 'tmp_dirs')
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
try:
|
2017-06-07 16:52:07 +02:00
|
|
|
os.makedirs(os.path.join(self.dir_path, 'tmp_dirs'))
|
2017-05-03 13:14:48 +02:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2017-07-12 16:28:28 +02:00
|
|
|
self.user = self.get_username()
|
|
|
|
self.probackup_path = None
|
2018-12-17 16:44:42 +02:00
|
|
|
if 'PGPROBACKUPBIN' in self.test_env:
|
2017-12-22 01:04:59 +02:00
|
|
|
if (
|
2018-12-25 16:48:49 +02:00
|
|
|
os.path.isfile(self.test_env["PGPROBACKUPBIN"]) and
|
|
|
|
os.access(self.test_env["PGPROBACKUPBIN"], os.X_OK)
|
2017-12-22 01:04:59 +02:00
|
|
|
):
|
2018-12-25 16:48:49 +02:00
|
|
|
self.probackup_path = self.test_env["PGPROBACKUPBIN"]
|
2017-07-12 16:28:28 +02:00
|
|
|
else:
|
|
|
|
if self.verbose:
|
2018-10-12 21:34:31 +02:00
|
|
|
print('PGPROBACKUPBIN is not an executable file')
|
|
|
|
|
|
|
|
if not self.probackup_path:
|
|
|
|
probackup_path_tmp = os.path.join(
|
2018-12-17 16:44:42 +02:00
|
|
|
testgres.get_pg_config()['BINDIR'], 'pg_probackup')
|
2018-10-12 21:34:31 +02:00
|
|
|
|
|
|
|
if os.path.isfile(probackup_path_tmp):
|
|
|
|
if not os.access(probackup_path_tmp, os.X_OK):
|
|
|
|
print('{0} is not an executable file'.format(
|
|
|
|
probackup_path_tmp))
|
|
|
|
else:
|
|
|
|
self.probackup_path = probackup_path_tmp
|
|
|
|
|
2017-07-12 16:28:28 +02:00
|
|
|
if not self.probackup_path:
|
2018-10-12 21:34:31 +02:00
|
|
|
probackup_path_tmp = os.path.abspath(os.path.join(
|
2018-12-17 16:44:42 +02:00
|
|
|
self.dir_path, '../pg_probackup'))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-10-12 21:34:31 +02:00
|
|
|
if os.path.isfile(probackup_path_tmp):
|
|
|
|
if not os.access(probackup_path_tmp, os.X_OK):
|
|
|
|
print('{0} is not an executable file'.format(
|
|
|
|
probackup_path_tmp))
|
|
|
|
else:
|
|
|
|
self.probackup_path = probackup_path_tmp
|
|
|
|
|
|
|
|
if not self.probackup_path:
|
|
|
|
print('pg_probackup binary is not found')
|
|
|
|
exit(1)
|
|
|
|
|
2018-12-25 21:04:56 +02:00
|
|
|
if os.name == 'posix':
|
|
|
|
os.environ['PATH'] = os.path.dirname(
|
|
|
|
self.probackup_path) + ':' + os.environ['PATH']
|
|
|
|
|
|
|
|
elif os.name == 'nt':
|
|
|
|
os.environ['PATH'] = os.path.dirname(
|
|
|
|
self.probackup_path) + ';' + os.environ['PATH']
|
2018-10-12 21:34:31 +02:00
|
|
|
|
2018-10-15 18:31:12 +02:00
|
|
|
self.probackup_old_path = None
|
|
|
|
|
2018-12-17 16:44:42 +02:00
|
|
|
if 'PGPROBACKUPBIN_OLD' in self.test_env:
|
2018-10-15 11:27:51 +02:00
|
|
|
if (
|
2018-12-17 16:44:42 +02:00
|
|
|
os.path.isfile(self.test_env['PGPROBACKUPBIN_OLD']) and
|
|
|
|
os.access(self.test_env['PGPROBACKUPBIN_OLD'], os.X_OK)
|
2018-10-15 11:27:51 +02:00
|
|
|
):
|
2018-12-17 16:44:42 +02:00
|
|
|
self.probackup_old_path = self.test_env['PGPROBACKUPBIN_OLD']
|
2018-10-15 11:27:51 +02:00
|
|
|
else:
|
|
|
|
if self.verbose:
|
|
|
|
print('PGPROBACKUPBIN_OLD is not an executable file')
|
|
|
|
|
2017-06-07 16:52:07 +02:00
|
|
|
def make_simple_node(
|
|
|
|
self,
|
|
|
|
base_dir=None,
|
|
|
|
set_replication=False,
|
|
|
|
initdb_params=[],
|
|
|
|
pg_options={}):
|
|
|
|
|
|
|
|
real_base_dir = os.path.join(self.tmp_path, base_dir)
|
2017-05-03 13:14:48 +02:00
|
|
|
shutil.rmtree(real_base_dir, ignore_errors=True)
|
2018-01-27 17:31:19 +02:00
|
|
|
os.makedirs(real_base_dir)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-10-04 16:03:03 +02:00
|
|
|
node = testgres.get_new_node('test', base_dir=real_base_dir)
|
2018-07-11 09:50:38 +02:00
|
|
|
# bound method slow_start() to 'node' class instance
|
|
|
|
node.slow_start = slow_start.__get__(node)
|
2017-12-05 16:25:43 +02:00
|
|
|
node.should_rm_dirs = True
|
2018-01-27 17:31:19 +02:00
|
|
|
node.init(
|
|
|
|
initdb_params=initdb_params, allow_streaming=set_replication)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-05-01 12:41:17 +02:00
|
|
|
# Sane default parameters
|
2018-12-17 16:44:42 +02:00
|
|
|
node.append_conf('postgresql.auto.conf', 'max_connections = 100')
|
|
|
|
node.append_conf('postgresql.auto.conf', 'shared_buffers = 10MB')
|
2019-02-08 08:34:54 +02:00
|
|
|
node.append_conf('postgresql.auto.conf', 'fsync = off')
|
2018-12-17 16:44:42 +02:00
|
|
|
node.append_conf('postgresql.auto.conf', 'wal_level = logical')
|
|
|
|
node.append_conf('postgresql.auto.conf', 'hot_standby = off')
|
2017-07-12 16:28:28 +02:00
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
node.append_conf(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgresql.auto.conf', "log_line_prefix = '%t [%p]: [%l-1] '")
|
|
|
|
node.append_conf('postgresql.auto.conf', 'log_statement = none')
|
|
|
|
node.append_conf('postgresql.auto.conf', 'log_duration = on')
|
2017-12-22 01:04:59 +02:00
|
|
|
node.append_conf(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgresql.auto.conf', 'log_min_duration_statement = 0')
|
|
|
|
node.append_conf('postgresql.auto.conf', 'log_connections = on')
|
|
|
|
node.append_conf('postgresql.auto.conf', 'log_disconnections = on')
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# Apply given parameters
|
|
|
|
for key, value in six.iteritems(pg_options):
|
2018-12-17 16:44:42 +02:00
|
|
|
node.append_conf('postgresql.auto.conf', '%s = %s' % (key, value))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# Allow replication in pg_hba.conf
|
|
|
|
if set_replication:
|
2018-01-27 17:31:19 +02:00
|
|
|
node.append_conf(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgresql.auto.conf',
|
|
|
|
'max_wal_senders = 10')
|
2017-12-05 16:25:43 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
return node
|
|
|
|
|
2018-01-25 20:37:27 +02:00
|
|
|
def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False):
|
2017-05-03 13:14:48 +02:00
|
|
|
res = node.execute(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgres',
|
|
|
|
'select exists'
|
2017-12-22 01:04:59 +02:00
|
|
|
" (select 1 from pg_tablespace where spcname = '{0}')".format(
|
|
|
|
tblspc_name)
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
# Check that tablespace with name 'tblspc_name' do not exists already
|
2017-12-22 01:04:59 +02:00
|
|
|
self.assertFalse(
|
|
|
|
res[0][0],
|
|
|
|
'Tablespace "{0}" already exists'.format(tblspc_name)
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-01-25 20:37:27 +02:00
|
|
|
if not tblspc_path:
|
2018-01-27 17:31:19 +02:00
|
|
|
tblspc_path = os.path.join(
|
|
|
|
node.base_dir, '{0}'.format(tblspc_name))
|
2017-12-22 01:04:59 +02:00
|
|
|
cmd = "CREATE TABLESPACE {0} LOCATION '{1}'".format(
|
|
|
|
tblspc_name, tblspc_path)
|
2017-05-03 13:14:48 +02:00
|
|
|
if cfs:
|
2018-12-17 16:44:42 +02:00
|
|
|
cmd += ' with (compression=true)'
|
2018-04-25 21:15:05 +02:00
|
|
|
|
|
|
|
if not os.path.exists(tblspc_path):
|
|
|
|
os.makedirs(tblspc_path)
|
2018-12-17 16:44:42 +02:00
|
|
|
res = node.safe_psql('postgres', cmd)
|
2017-05-03 13:14:48 +02:00
|
|
|
# Check that tablespace was successfully created
|
2017-12-22 01:04:59 +02:00
|
|
|
# self.assertEqual(
|
|
|
|
# res[0], 0,
|
|
|
|
# 'Failed to create tablespace with cmd: {0}'.format(cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-07-12 16:28:28 +02:00
|
|
|
def get_tblspace_path(self, node, tblspc_name):
|
|
|
|
return os.path.join(node.base_dir, tblspc_name)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
def get_fork_size(self, node, fork_name):
|
2017-12-22 01:04:59 +02:00
|
|
|
return node.execute(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgres',
|
2017-05-03 13:14:48 +02:00
|
|
|
"select pg_relation_size('{0}')/8192".format(fork_name))[0][0]
|
|
|
|
|
|
|
|
def get_fork_path(self, node, fork_name):
|
2017-12-22 01:04:59 +02:00
|
|
|
return os.path.join(
|
|
|
|
node.base_dir, 'data', node.execute(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgres',
|
2017-12-22 01:04:59 +02:00
|
|
|
"select pg_relation_filepath('{0}')".format(
|
|
|
|
fork_name))[0][0]
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-11-07 08:40:13 +02:00
|
|
|
def get_md5_per_page_for_fork(self, file, size_in_pages):
|
|
|
|
pages_per_segment = {}
|
2017-05-03 13:14:48 +02:00
|
|
|
md5_per_page = {}
|
2017-11-07 08:40:13 +02:00
|
|
|
nsegments = size_in_pages/131072
|
2017-12-22 01:04:59 +02:00
|
|
|
if size_in_pages % 131072 != 0:
|
2017-11-07 08:40:13 +02:00
|
|
|
nsegments = nsegments + 1
|
|
|
|
|
|
|
|
size = size_in_pages
|
|
|
|
for segment_number in range(nsegments):
|
2017-12-05 16:25:43 +02:00
|
|
|
if size - 131072 > 0:
|
2017-11-07 08:40:13 +02:00
|
|
|
pages_per_segment[segment_number] = 131072
|
|
|
|
else:
|
|
|
|
pages_per_segment[segment_number] = size
|
2017-12-05 16:25:43 +02:00
|
|
|
size = size - 131072
|
2017-11-07 08:40:13 +02:00
|
|
|
|
|
|
|
for segment_number in range(nsegments):
|
|
|
|
offset = 0
|
|
|
|
if segment_number == 0:
|
|
|
|
file_desc = os.open(file, os.O_RDONLY)
|
|
|
|
start_page = 0
|
|
|
|
end_page = pages_per_segment[segment_number]
|
|
|
|
else:
|
2017-12-22 01:04:59 +02:00
|
|
|
file_desc = os.open(
|
2018-12-17 16:44:42 +02:00
|
|
|
file+'.{0}'.format(segment_number), os.O_RDONLY
|
2017-12-22 01:04:59 +02:00
|
|
|
)
|
2017-11-07 08:40:13 +02:00
|
|
|
start_page = max(md5_per_page)+1
|
|
|
|
end_page = end_page + pages_per_segment[segment_number]
|
|
|
|
|
|
|
|
for page in range(start_page, end_page):
|
2017-12-22 01:04:59 +02:00
|
|
|
md5_per_page[page] = hashlib.md5(
|
|
|
|
os.read(file_desc, 8192)).hexdigest()
|
2017-11-07 08:40:13 +02:00
|
|
|
offset += 8192
|
|
|
|
os.lseek(file_desc, offset, 0)
|
|
|
|
os.close(file_desc)
|
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
return md5_per_page
|
|
|
|
|
2017-07-12 16:28:28 +02:00
|
|
|
def get_ptrack_bits_per_page_for_fork(self, node, file, size=[]):
|
|
|
|
|
2017-05-25 11:53:33 +02:00
|
|
|
if self.get_pgpro_edition(node) == 'enterprise':
|
|
|
|
header_size = 48
|
|
|
|
else:
|
|
|
|
header_size = 24
|
2017-05-05 15:21:49 +02:00
|
|
|
ptrack_bits_for_fork = []
|
2017-11-09 11:45:04 +02:00
|
|
|
|
|
|
|
page_body_size = 8192-header_size
|
2017-05-03 13:14:48 +02:00
|
|
|
byte_size = os.path.getsize(file + '_ptrack')
|
2017-11-09 11:45:04 +02:00
|
|
|
npages = byte_size/8192
|
2017-12-22 01:04:59 +02:00
|
|
|
if byte_size % 8192 != 0:
|
2017-11-09 11:45:04 +02:00
|
|
|
print('Ptrack page is not 8k aligned')
|
|
|
|
sys.exit(1)
|
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
file = os.open(file + '_ptrack', os.O_RDONLY)
|
2017-11-09 11:45:04 +02:00
|
|
|
|
|
|
|
for page in range(npages):
|
|
|
|
offset = 8192*page+header_size
|
|
|
|
os.lseek(file, offset, 0)
|
|
|
|
lots_of_bytes = os.read(file, page_body_size)
|
2017-12-22 01:04:59 +02:00
|
|
|
byte_list = [
|
|
|
|
lots_of_bytes[i:i+1] for i in range(len(lots_of_bytes))
|
|
|
|
]
|
2017-11-09 11:45:04 +02:00
|
|
|
for byte in byte_list:
|
2017-12-22 01:04:59 +02:00
|
|
|
# byte_inverted = bin(int(byte, base=16))[2:][::-1]
|
|
|
|
# bits = (byte >> x) & 1 for x in range(7, -1, -1)
|
2017-11-09 11:45:04 +02:00
|
|
|
byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1]
|
|
|
|
for bit in byte_inverted:
|
2017-12-22 01:04:59 +02:00
|
|
|
# if len(ptrack_bits_for_fork) < size:
|
|
|
|
ptrack_bits_for_fork.append(int(bit))
|
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
os.close(file)
|
2017-05-05 15:21:49 +02:00
|
|
|
return ptrack_bits_for_fork
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
def check_ptrack_sanity(self, idx_dict):
|
|
|
|
success = True
|
|
|
|
if idx_dict['new_size'] > idx_dict['old_size']:
|
|
|
|
size = idx_dict['new_size']
|
|
|
|
else:
|
|
|
|
size = idx_dict['old_size']
|
|
|
|
for PageNum in range(size):
|
|
|
|
if PageNum not in idx_dict['old_pages']:
|
|
|
|
# Page was not present before, meaning that relation got bigger
|
|
|
|
# Ptrack should be equal to 1
|
|
|
|
if idx_dict['ptrack'][PageNum] != 1:
|
2017-06-27 07:42:52 +02:00
|
|
|
if self.verbose:
|
2017-12-22 01:04:59 +02:00
|
|
|
print(
|
2018-09-25 19:34:38 +02:00
|
|
|
'File: {0}\n Page Number {1} of type {2} was added,'
|
|
|
|
' but ptrack value is {3}. THIS IS BAD'.format(
|
|
|
|
idx_dict['path'],
|
2017-12-22 01:04:59 +02:00
|
|
|
PageNum, idx_dict['type'],
|
|
|
|
idx_dict['ptrack'][PageNum])
|
|
|
|
)
|
2017-11-07 08:40:13 +02:00
|
|
|
# print(idx_dict)
|
2017-05-03 13:14:48 +02:00
|
|
|
success = False
|
|
|
|
continue
|
|
|
|
if PageNum not in idx_dict['new_pages']:
|
|
|
|
# Page is not present now, meaning that relation got smaller
|
2018-10-16 15:58:28 +02:00
|
|
|
# Ptrack should be equal to 1,
|
2017-12-22 01:04:59 +02:00
|
|
|
# We are not freaking out about false positive stuff
|
2018-10-16 15:58:28 +02:00
|
|
|
if idx_dict['ptrack'][PageNum] != 1:
|
2017-07-12 16:28:28 +02:00
|
|
|
if self.verbose:
|
2017-12-22 01:04:59 +02:00
|
|
|
print(
|
2018-09-25 19:34:38 +02:00
|
|
|
'File: {0}\n Page Number {1} of type {2} was deleted,'
|
2018-10-16 18:16:46 +02:00
|
|
|
' but ptrack value is {3}. THIS IS BAD'.format(
|
2018-09-25 19:34:38 +02:00
|
|
|
idx_dict['path'],
|
2017-12-22 01:04:59 +02:00
|
|
|
PageNum, idx_dict['type'],
|
|
|
|
idx_dict['ptrack'][PageNum])
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
continue
|
2018-01-27 17:31:19 +02:00
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
# Ok, all pages in new_pages that do not have
|
|
|
|
# corresponding page in old_pages are been dealt with.
|
|
|
|
# We can now safely proceed to comparing old and new pages
|
|
|
|
if idx_dict['new_pages'][
|
|
|
|
PageNum] != idx_dict['old_pages'][PageNum]:
|
|
|
|
# Page has been changed,
|
|
|
|
# meaning that ptrack should be equal to 1
|
2017-05-03 13:14:48 +02:00
|
|
|
if idx_dict['ptrack'][PageNum] != 1:
|
2017-06-27 07:42:52 +02:00
|
|
|
if self.verbose:
|
2017-12-22 01:04:59 +02:00
|
|
|
print(
|
2018-09-25 19:34:38 +02:00
|
|
|
'File: {0}\n Page Number {1} of type {2} was changed,'
|
|
|
|
' but ptrack value is {3}. THIS IS BAD'.format(
|
|
|
|
idx_dict['path'],
|
2017-12-22 01:04:59 +02:00
|
|
|
PageNum, idx_dict['type'],
|
|
|
|
idx_dict['ptrack'][PageNum])
|
|
|
|
)
|
|
|
|
print(
|
2018-12-17 16:44:42 +02:00
|
|
|
' Old checksumm: {0}\n'
|
|
|
|
' New checksumm: {1}'.format(
|
2017-12-22 01:04:59 +02:00
|
|
|
idx_dict['old_pages'][PageNum],
|
|
|
|
idx_dict['new_pages'][PageNum])
|
|
|
|
)
|
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
if PageNum == 0 and idx_dict['type'] == 'spgist':
|
2017-06-27 07:42:52 +02:00
|
|
|
if self.verbose:
|
2017-12-22 01:04:59 +02:00
|
|
|
print(
|
2018-01-27 17:31:19 +02:00
|
|
|
'SPGIST is a special snowflake, so don`t '
|
2017-12-22 01:04:59 +02:00
|
|
|
'fret about losing ptrack for blknum 0'
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
continue
|
|
|
|
success = False
|
|
|
|
else:
|
2017-12-22 01:04:59 +02:00
|
|
|
# Page has not been changed,
|
|
|
|
# meaning that ptrack should be equal to 0
|
2017-05-03 13:14:48 +02:00
|
|
|
if idx_dict['ptrack'][PageNum] != 0:
|
2017-06-27 07:42:52 +02:00
|
|
|
if self.verbose:
|
2017-12-22 01:04:59 +02:00
|
|
|
print(
|
2018-09-25 19:34:38 +02:00
|
|
|
'File: {0}\n Page Number {1} of type {2} was not changed,'
|
|
|
|
' but ptrack value is {3}'.format(
|
|
|
|
idx_dict['path'],
|
2017-12-22 01:04:59 +02:00
|
|
|
PageNum, idx_dict['type'],
|
|
|
|
idx_dict['ptrack'][PageNum]
|
|
|
|
)
|
|
|
|
)
|
2018-10-16 21:53:59 +02:00
|
|
|
return success
|
|
|
|
# self.assertTrue(
|
|
|
|
# success, 'Ptrack has failed to register changes in data files'
|
|
|
|
# )
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
def check_ptrack_recovery(self, idx_dict):
|
|
|
|
size = idx_dict['size']
|
|
|
|
for PageNum in range(size):
|
|
|
|
if idx_dict['ptrack'][PageNum] != 1:
|
2017-12-22 01:04:59 +02:00
|
|
|
self.assertTrue(
|
|
|
|
False,
|
|
|
|
'Recovery for Page Number {0} of Type {1}'
|
|
|
|
' was conducted, but ptrack value is {2}.'
|
|
|
|
' THIS IS BAD\n IDX_DICT: {3}'.format(
|
|
|
|
PageNum, idx_dict['type'],
|
|
|
|
idx_dict['ptrack'][PageNum],
|
|
|
|
idx_dict
|
|
|
|
)
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-05-03 17:05:19 +02:00
|
|
|
def check_ptrack_clean(self, idx_dict, size):
|
2017-05-03 13:14:48 +02:00
|
|
|
for PageNum in range(size):
|
|
|
|
if idx_dict['ptrack'][PageNum] != 0:
|
2017-12-22 01:04:59 +02:00
|
|
|
self.assertTrue(
|
|
|
|
False,
|
|
|
|
'Ptrack for Page Number {0} of Type {1}'
|
|
|
|
' should be clean, but ptrack value is {2}.'
|
|
|
|
'\n THIS IS BAD\n IDX_DICT: {3}'.format(
|
|
|
|
PageNum,
|
|
|
|
idx_dict['type'],
|
|
|
|
idx_dict['ptrack'][PageNum],
|
|
|
|
idx_dict
|
|
|
|
)
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2019-01-10 17:12:00 +02:00
|
|
|
def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False):
|
2018-10-15 11:27:51 +02:00
|
|
|
if not self.probackup_old_path and old_binary:
|
2018-12-17 16:44:42 +02:00
|
|
|
print('PGPROBACKUPBIN_OLD is not set')
|
2018-10-15 11:27:51 +02:00
|
|
|
exit(1)
|
|
|
|
|
|
|
|
if old_binary:
|
|
|
|
binary_path = self.probackup_old_path
|
|
|
|
else:
|
|
|
|
binary_path = self.probackup_path
|
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
try:
|
2018-10-15 11:27:51 +02:00
|
|
|
self.cmd = [' '.join(map(str, [binary_path] + command))]
|
2017-06-27 07:42:52 +02:00
|
|
|
if self.verbose:
|
|
|
|
print(self.cmd)
|
2017-12-22 01:04:59 +02:00
|
|
|
if gdb:
|
2018-10-15 11:27:51 +02:00
|
|
|
return GDBobj([binary_path] + command, self.verbose)
|
2019-01-10 17:12:00 +02:00
|
|
|
if asynchronous:
|
2017-12-22 01:04:59 +02:00
|
|
|
return subprocess.Popen(
|
|
|
|
self.cmd,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
env=self.test_env
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
else:
|
2017-12-22 01:04:59 +02:00
|
|
|
self.output = subprocess.check_output(
|
2018-10-15 11:27:51 +02:00
|
|
|
[binary_path] + command,
|
2017-12-22 01:04:59 +02:00
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
env=self.test_env
|
2018-12-17 16:44:42 +02:00
|
|
|
).decode('utf-8')
|
2017-12-22 01:04:59 +02:00
|
|
|
if command[0] == 'backup':
|
|
|
|
# return backup ID
|
|
|
|
for line in self.output.splitlines():
|
|
|
|
if 'INFO: Backup' and 'completed' in line:
|
|
|
|
return line.split()[2]
|
|
|
|
else:
|
|
|
|
return self.output
|
2017-05-03 13:14:48 +02:00
|
|
|
except subprocess.CalledProcessError as e:
|
2018-12-17 16:44:42 +02:00
|
|
|
raise ProbackupException(e.output.decode('utf-8'), self.cmd)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2019-01-10 17:12:00 +02:00
|
|
|
def run_binary(self, command, asynchronous=False):
|
2017-12-28 10:52:18 +02:00
|
|
|
if self.verbose:
|
|
|
|
print([' '.join(map(str, command))])
|
2017-12-19 12:17:58 +02:00
|
|
|
try:
|
2019-01-10 17:12:00 +02:00
|
|
|
if asynchronous:
|
2017-12-22 01:04:59 +02:00
|
|
|
return subprocess.Popen(
|
|
|
|
command,
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
env=self.test_env
|
|
|
|
)
|
2017-12-19 12:17:58 +02:00
|
|
|
else:
|
|
|
|
self.output = subprocess.check_output(
|
|
|
|
command,
|
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
env=self.test_env
|
2018-12-17 16:44:42 +02:00
|
|
|
).decode('utf-8')
|
2017-12-19 12:17:58 +02:00
|
|
|
return self.output
|
|
|
|
except subprocess.CalledProcessError as e:
|
2018-12-17 16:44:42 +02:00
|
|
|
raise ProbackupException(e.output.decode('utf-8'), command)
|
2017-12-19 12:17:58 +02:00
|
|
|
|
2018-10-15 11:27:51 +02:00
|
|
|
def init_pb(self, backup_dir, old_binary=False):
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
shutil.rmtree(backup_dir, ignore_errors=True)
|
2018-10-15 11:27:51 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
return self.run_pb([
|
2018-12-17 16:44:42 +02:00
|
|
|
'init',
|
|
|
|
'-B', backup_dir
|
2018-10-15 11:27:51 +02:00
|
|
|
],
|
|
|
|
old_binary=old_binary
|
|
|
|
)
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2019-01-16 15:31:26 +02:00
|
|
|
def add_instance(self, backup_dir, instance, node, old_binary=False, options=[]):
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2019-01-16 15:31:26 +02:00
|
|
|
cmd = [
|
2018-12-17 16:44:42 +02:00
|
|
|
'add-instance',
|
|
|
|
'--instance={0}'.format(instance),
|
|
|
|
'-B', backup_dir,
|
|
|
|
'-D', node.data_dir
|
2019-01-16 15:31:26 +02:00
|
|
|
]
|
|
|
|
|
|
|
|
return self.run_pb(cmd + options, old_binary=old_binary)
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2019-01-16 16:47:08 +02:00
|
|
|
def set_config(self, backup_dir, instance, old_binary=False, options=[]):
|
|
|
|
|
|
|
|
cmd = [
|
|
|
|
'set-config',
|
|
|
|
'--instance={0}'.format(instance),
|
|
|
|
'-B', backup_dir,
|
|
|
|
]
|
|
|
|
|
|
|
|
return self.run_pb(cmd + options, old_binary=old_binary)
|
|
|
|
|
2018-10-15 11:27:51 +02:00
|
|
|
def del_instance(self, backup_dir, instance, old_binary=False):
|
2017-06-07 16:52:07 +02:00
|
|
|
|
|
|
|
return self.run_pb([
|
2018-12-17 16:44:42 +02:00
|
|
|
'del-instance',
|
|
|
|
'--instance={0}'.format(instance),
|
|
|
|
'-B', backup_dir
|
2018-10-15 11:27:51 +02:00
|
|
|
],
|
|
|
|
old_binary=old_binary
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
def clean_pb(self, backup_dir):
|
|
|
|
shutil.rmtree(backup_dir, ignore_errors=True)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
def backup_node(
|
|
|
|
self, backup_dir, instance, node, data_dir=False,
|
2019-01-10 17:12:00 +02:00
|
|
|
backup_type='full', options=[], asynchronous=False, gdb=False,
|
2018-10-15 11:27:51 +02:00
|
|
|
old_binary=False
|
2017-12-22 01:04:59 +02:00
|
|
|
):
|
2017-06-27 07:42:52 +02:00
|
|
|
if not node and not data_dir:
|
|
|
|
print('You must provide ether node or data_dir for backup')
|
|
|
|
exit(1)
|
|
|
|
|
|
|
|
if node:
|
|
|
|
pgdata = node.data_dir
|
|
|
|
|
|
|
|
if data_dir:
|
|
|
|
pgdata = data_dir
|
2017-05-17 11:46:38 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
cmd_list = [
|
2018-12-17 16:44:42 +02:00
|
|
|
'backup',
|
|
|
|
'-B', backup_dir,
|
2017-12-22 01:04:59 +02:00
|
|
|
# "-D", pgdata,
|
2018-12-17 16:44:42 +02:00
|
|
|
'-p', '%i' % node.port,
|
|
|
|
'-d', 'postgres',
|
|
|
|
'--instance={0}'.format(instance)
|
2017-05-03 13:14:48 +02:00
|
|
|
]
|
|
|
|
if backup_type:
|
2018-12-17 16:44:42 +02:00
|
|
|
cmd_list += ['-b', backup_type]
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2019-01-10 17:12:00 +02:00
|
|
|
return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-08-13 09:08:38 +02:00
|
|
|
def merge_backup(
|
2019-01-10 17:12:00 +02:00
|
|
|
self, backup_dir, instance, backup_id, asynchronous=False,
|
2018-10-15 11:27:51 +02:00
|
|
|
gdb=False, old_binary=False, options=[]):
|
2018-08-02 10:57:39 +02:00
|
|
|
cmd_list = [
|
2018-12-17 16:44:42 +02:00
|
|
|
'merge',
|
|
|
|
'-B', backup_dir,
|
|
|
|
'--instance={0}'.format(instance),
|
|
|
|
'-i', backup_id
|
2018-08-02 10:57:39 +02:00
|
|
|
]
|
|
|
|
|
2019-01-10 17:12:00 +02:00
|
|
|
return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary)
|
2018-08-02 10:57:39 +02:00
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
def restore_node(
|
|
|
|
self, backup_dir, instance, node=False,
|
2018-10-15 11:27:51 +02:00
|
|
|
data_dir=None, backup_id=None, old_binary=False, options=[]
|
2017-12-22 01:04:59 +02:00
|
|
|
):
|
2017-05-17 11:46:38 +02:00
|
|
|
if data_dir is None:
|
|
|
|
data_dir = node.data_dir
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
cmd_list = [
|
2018-12-17 16:44:42 +02:00
|
|
|
'restore',
|
|
|
|
'-B', backup_dir,
|
|
|
|
'-D', data_dir,
|
|
|
|
'--instance={0}'.format(instance)
|
2017-05-03 13:14:48 +02:00
|
|
|
]
|
2017-06-20 12:57:23 +02:00
|
|
|
if backup_id:
|
2018-12-17 16:44:42 +02:00
|
|
|
cmd_list += ['-i', backup_id]
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-10-15 11:27:51 +02:00
|
|
|
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
def show_pb(
|
|
|
|
self, backup_dir, instance=None, backup_id=None,
|
2018-10-15 11:27:51 +02:00
|
|
|
options=[], as_text=False, as_json=True, old_binary=False
|
2017-12-22 01:04:59 +02:00
|
|
|
):
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
backup_list = []
|
|
|
|
specific_record = {}
|
|
|
|
cmd_list = [
|
2018-12-17 16:44:42 +02:00
|
|
|
'show',
|
|
|
|
'-B', backup_dir,
|
2017-05-03 13:14:48 +02:00
|
|
|
]
|
2017-06-07 16:52:07 +02:00
|
|
|
if instance:
|
2018-12-17 16:44:42 +02:00
|
|
|
cmd_list += ['--instance={0}'.format(instance)]
|
2017-06-07 16:52:07 +02:00
|
|
|
|
|
|
|
if backup_id:
|
2018-12-17 16:44:42 +02:00
|
|
|
cmd_list += ['-i', backup_id]
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2019-01-12 00:42:01 +02:00
|
|
|
# AHTUNG, WARNING will break json parsing
|
2018-06-02 19:35:37 +02:00
|
|
|
if as_json:
|
2019-01-12 00:42:01 +02:00
|
|
|
cmd_list += ['--format=json', '--log-level-console=error']
|
2018-06-02 19:35:37 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
if as_text:
|
|
|
|
# You should print it when calling as_text=true
|
2018-10-15 11:27:51 +02:00
|
|
|
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# get show result as list of lines
|
2018-06-02 19:35:37 +02:00
|
|
|
if as_json:
|
2018-10-15 11:27:51 +02:00
|
|
|
data = json.loads(self.run_pb(cmd_list + options, old_binary=old_binary))
|
2018-06-02 19:35:37 +02:00
|
|
|
# print(data)
|
|
|
|
for instance_data in data:
|
|
|
|
# find specific instance if requested
|
|
|
|
if instance and instance_data['instance'] != instance:
|
2017-11-30 08:14:46 +02:00
|
|
|
continue
|
2018-06-02 19:35:37 +02:00
|
|
|
|
|
|
|
for backup in reversed(instance_data['backups']):
|
|
|
|
# find specific backup if requested
|
|
|
|
if backup_id:
|
|
|
|
if backup['id'] == backup_id:
|
|
|
|
return backup
|
|
|
|
else:
|
|
|
|
backup_list.append(backup)
|
2017-05-03 13:14:48 +02:00
|
|
|
return backup_list
|
|
|
|
else:
|
2018-10-15 11:27:51 +02:00
|
|
|
show_splitted = self.run_pb(
|
|
|
|
cmd_list + options, old_binary=old_binary).splitlines()
|
2018-06-02 19:35:37 +02:00
|
|
|
if instance is not None and backup_id is None:
|
|
|
|
# cut header(ID, Mode, etc) from show as single string
|
|
|
|
header = show_splitted[1:2][0]
|
|
|
|
# cut backup records from show as single list
|
|
|
|
# with string for every backup record
|
|
|
|
body = show_splitted[3:]
|
|
|
|
# inverse list so oldest record come first
|
|
|
|
body = body[::-1]
|
|
|
|
# split string in list with string for every header element
|
2018-12-17 16:44:42 +02:00
|
|
|
header_split = re.split(' +', header)
|
2018-06-02 19:35:37 +02:00
|
|
|
# Remove empty items
|
|
|
|
for i in header_split:
|
|
|
|
if i == '':
|
|
|
|
header_split.remove(i)
|
|
|
|
continue
|
|
|
|
header_split = [
|
|
|
|
header_element.rstrip() for header_element in header_split
|
|
|
|
]
|
|
|
|
for backup_record in body:
|
|
|
|
backup_record = backup_record.rstrip()
|
|
|
|
# split list with str for every backup record element
|
2018-12-17 16:44:42 +02:00
|
|
|
backup_record_split = re.split(' +', backup_record)
|
2018-06-02 19:35:37 +02:00
|
|
|
# Remove empty items
|
|
|
|
for i in backup_record_split:
|
|
|
|
if i == '':
|
|
|
|
backup_record_split.remove(i)
|
|
|
|
if len(header_split) != len(backup_record_split):
|
|
|
|
print(warning.format(
|
|
|
|
header=header, body=body,
|
|
|
|
header_split=header_split,
|
|
|
|
body_split=backup_record_split)
|
|
|
|
)
|
|
|
|
exit(1)
|
|
|
|
new_dict = dict(zip(header_split, backup_record_split))
|
|
|
|
backup_list.append(new_dict)
|
|
|
|
return backup_list
|
|
|
|
else:
|
|
|
|
# cut out empty lines and lines started with #
|
|
|
|
# and other garbage then reconstruct it as dictionary
|
|
|
|
# print show_splitted
|
|
|
|
sanitized_show = [item for item in show_splitted if item]
|
|
|
|
sanitized_show = [
|
|
|
|
item for item in sanitized_show if not item.startswith('#')
|
|
|
|
]
|
|
|
|
# print sanitized_show
|
|
|
|
for line in sanitized_show:
|
2018-12-17 16:44:42 +02:00
|
|
|
name, var = line.partition(' = ')[::2]
|
2018-06-02 19:35:37 +02:00
|
|
|
var = var.strip('"')
|
|
|
|
var = var.strip("'")
|
|
|
|
specific_record[name.strip()] = var
|
|
|
|
return specific_record
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
def validate_pb(
|
|
|
|
self, backup_dir, instance=None,
|
2019-02-19 14:24:57 +02:00
|
|
|
backup_id=None, options=[], old_binary=False, gdb=False
|
2017-12-22 01:04:59 +02:00
|
|
|
):
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
cmd_list = [
|
2018-12-17 16:44:42 +02:00
|
|
|
'validate',
|
|
|
|
'-B', backup_dir
|
2017-05-03 13:14:48 +02:00
|
|
|
]
|
2017-06-07 16:52:07 +02:00
|
|
|
if instance:
|
2018-12-17 16:44:42 +02:00
|
|
|
cmd_list += ['--instance={0}'.format(instance)]
|
2017-06-20 12:57:23 +02:00
|
|
|
if backup_id:
|
2018-12-17 16:44:42 +02:00
|
|
|
cmd_list += ['-i', backup_id]
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2019-02-19 14:24:57 +02:00
|
|
|
return self.run_pb(cmd_list + options, old_binary=old_binary, gdb=gdb)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-10-15 11:27:51 +02:00
|
|
|
def delete_pb(
|
|
|
|
self, backup_dir, instance,
|
|
|
|
backup_id=None, options=[], old_binary=False):
|
2017-05-03 13:14:48 +02:00
|
|
|
cmd_list = [
|
2018-12-17 16:44:42 +02:00
|
|
|
'delete',
|
|
|
|
'-B', backup_dir
|
2017-05-03 13:14:48 +02:00
|
|
|
]
|
2017-07-12 16:28:28 +02:00
|
|
|
|
2018-12-17 16:44:42 +02:00
|
|
|
cmd_list += ['--instance={0}'.format(instance)]
|
2017-06-20 12:57:23 +02:00
|
|
|
if backup_id:
|
2018-12-17 16:44:42 +02:00
|
|
|
cmd_list += ['-i', backup_id]
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-10-15 11:27:51 +02:00
|
|
|
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-10-15 11:27:51 +02:00
|
|
|
def delete_expired(
|
|
|
|
self, backup_dir, instance, options=[], old_binary=False):
|
2017-05-03 13:14:48 +02:00
|
|
|
cmd_list = [
|
2018-12-17 16:44:42 +02:00
|
|
|
'delete', '--expired', '--wal',
|
|
|
|
'-B', backup_dir,
|
|
|
|
'--instance={0}'.format(instance)
|
2017-05-03 13:14:48 +02:00
|
|
|
]
|
2018-10-15 11:27:51 +02:00
|
|
|
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-10-15 11:27:51 +02:00
|
|
|
def show_config(self, backup_dir, instance, old_binary=False):
|
2017-05-05 15:21:49 +02:00
|
|
|
out_dict = {}
|
2017-05-03 13:14:48 +02:00
|
|
|
cmd_list = [
|
2018-12-17 16:44:42 +02:00
|
|
|
'show-config',
|
|
|
|
'-B', backup_dir,
|
|
|
|
'--instance={0}'.format(instance)
|
2017-05-03 13:14:48 +02:00
|
|
|
]
|
2018-10-15 16:09:16 +02:00
|
|
|
|
2018-10-15 11:27:51 +02:00
|
|
|
res = self.run_pb(cmd_list, old_binary=old_binary).splitlines()
|
2017-05-05 15:21:49 +02:00
|
|
|
for line in res:
|
|
|
|
if not line.startswith('#'):
|
2018-12-17 16:44:42 +02:00
|
|
|
name, var = line.partition(' = ')[::2]
|
2017-05-05 15:21:49 +02:00
|
|
|
out_dict[name] = var
|
|
|
|
return out_dict
|
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
def get_recovery_conf(self, node):
|
|
|
|
out_dict = {}
|
2017-12-22 01:04:59 +02:00
|
|
|
with open(
|
2018-12-17 16:44:42 +02:00
|
|
|
os.path.join(node.data_dir, 'recovery.conf'), 'r'
|
2017-12-22 01:04:59 +02:00
|
|
|
) as recovery_conf:
|
2017-05-03 13:14:48 +02:00
|
|
|
for line in recovery_conf:
|
|
|
|
try:
|
2018-12-17 16:44:42 +02:00
|
|
|
key, value = line.split('=')
|
2017-05-03 13:14:48 +02:00
|
|
|
except:
|
|
|
|
continue
|
|
|
|
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
|
|
|
|
return out_dict
|
|
|
|
|
2018-01-25 20:37:27 +02:00
|
|
|
def set_archiving(
|
2018-11-16 08:35:41 +02:00
|
|
|
self, backup_dir, instance, node, replica=False,
|
|
|
|
overwrite=False, compress=False, old_binary=False):
|
2017-05-25 11:53:33 +02:00
|
|
|
|
|
|
|
if replica:
|
|
|
|
archive_mode = 'always'
|
|
|
|
node.append_conf('postgresql.auto.conf', 'hot_standby = on')
|
|
|
|
else:
|
|
|
|
archive_mode = 'on'
|
|
|
|
|
2017-05-03 17:05:19 +02:00
|
|
|
node.append_conf(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgresql.auto.conf',
|
|
|
|
'archive_mode = {0}'.format(archive_mode)
|
2017-05-03 17:05:19 +02:00
|
|
|
)
|
2018-12-18 11:06:04 +02:00
|
|
|
if os.name == 'posix':
|
|
|
|
archive_command = '"{0}" archive-push -B {1} --instance={2} '.format(
|
|
|
|
self.probackup_path, backup_dir, instance)
|
|
|
|
|
|
|
|
elif os.name == 'nt':
|
|
|
|
archive_command = '"{0}" archive-push -B {1} --instance={2} '.format(
|
|
|
|
self.probackup_path.replace("\\","\\\\"),
|
|
|
|
backup_dir.replace("\\","\\\\"),
|
|
|
|
instance)
|
2018-01-25 20:37:27 +02:00
|
|
|
|
2018-12-17 16:44:42 +02:00
|
|
|
if self.archive_compress or compress:
|
|
|
|
archive_command = archive_command + '--compress '
|
2018-01-25 20:37:27 +02:00
|
|
|
|
2018-12-17 16:44:42 +02:00
|
|
|
if overwrite:
|
|
|
|
archive_command = archive_command + '--overwrite '
|
2018-01-25 20:37:27 +02:00
|
|
|
|
2018-12-17 16:44:42 +02:00
|
|
|
if os.name == 'posix':
|
|
|
|
archive_command = archive_command + '--wal-file-path %p --wal-file-name %f'
|
|
|
|
|
|
|
|
elif os.name == 'nt':
|
|
|
|
archive_command = archive_command + '--wal-file-path "%p" --wal-file-name "%f"'
|
2018-01-25 20:37:27 +02:00
|
|
|
|
|
|
|
node.append_conf(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgresql.auto.conf',
|
2018-01-25 20:37:27 +02:00
|
|
|
"archive_command = '{0}'".format(
|
|
|
|
archive_command))
|
2017-05-03 17:05:19 +02:00
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
def set_replica(
|
|
|
|
self, master, replica,
|
|
|
|
replica_name='replica',
|
|
|
|
synchronous=False
|
|
|
|
):
|
|
|
|
replica.append_conf(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
|
2017-07-12 16:28:28 +02:00
|
|
|
replica.append_conf('postgresql.auto.conf', 'hot_standby = on')
|
2018-12-17 16:44:42 +02:00
|
|
|
replica.append_conf('recovery.conf', 'standby_mode = on')
|
2017-12-22 01:04:59 +02:00
|
|
|
replica.append_conf(
|
2018-12-17 16:44:42 +02:00
|
|
|
'recovery.conf',
|
2017-12-22 01:04:59 +02:00
|
|
|
"primary_conninfo = 'user={0} port={1} application_name={2}"
|
|
|
|
" sslmode=prefer sslcompression=1'".format(
|
|
|
|
self.user, master.port, replica_name)
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
if synchronous:
|
2017-12-22 01:04:59 +02:00
|
|
|
master.append_conf(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgresql.auto.conf',
|
2017-12-22 01:04:59 +02:00
|
|
|
"synchronous_standby_names='{0}'".format(replica_name)
|
|
|
|
)
|
|
|
|
master.append_conf(
|
|
|
|
'postgresql.auto.conf',
|
|
|
|
"synchronous_commit='remote_apply'"
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
master.reload()
|
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
def wrong_wal_clean(self, node, wal_size):
|
2018-12-17 16:44:42 +02:00
|
|
|
wals_dir = os.path.join(self.backup_dir(node), 'wal')
|
2017-12-22 01:04:59 +02:00
|
|
|
wals = [
|
|
|
|
f for f in os.listdir(wals_dir) if os.path.isfile(
|
|
|
|
os.path.join(wals_dir, f))
|
|
|
|
]
|
2017-05-03 13:14:48 +02:00
|
|
|
wals.sort()
|
|
|
|
file_path = os.path.join(wals_dir, wals[-1])
|
|
|
|
if os.path.getsize(file_path) != wal_size:
|
|
|
|
os.remove(file_path)
|
|
|
|
|
|
|
|
def guc_wal_segment_size(self, node):
|
2017-12-22 01:04:59 +02:00
|
|
|
var = node.execute(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgres',
|
2017-12-22 01:04:59 +02:00
|
|
|
"select setting from pg_settings where name = 'wal_segment_size'"
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
return int(var[0][0]) * self.guc_wal_block_size(node)
|
|
|
|
|
|
|
|
def guc_wal_block_size(self, node):
|
2017-12-22 01:04:59 +02:00
|
|
|
var = node.execute(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgres',
|
2017-12-22 01:04:59 +02:00
|
|
|
"select setting from pg_settings where name = 'wal_block_size'"
|
|
|
|
)
|
2017-05-03 13:14:48 +02:00
|
|
|
return int(var[0][0])
|
|
|
|
|
2017-05-25 11:53:33 +02:00
|
|
|
def get_pgpro_edition(self, node):
|
2017-12-22 01:04:59 +02:00
|
|
|
if node.execute(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgres',
|
2017-12-22 01:04:59 +02:00
|
|
|
"select exists (select 1 from"
|
|
|
|
" pg_proc where proname = 'pgpro_edition')"
|
|
|
|
)[0][0]:
|
2018-12-17 16:44:42 +02:00
|
|
|
var = node.execute('postgres', 'select pgpro_edition()')
|
2017-05-25 11:53:33 +02:00
|
|
|
return str(var[0][0])
|
|
|
|
else:
|
|
|
|
return False
|
2017-06-20 12:57:23 +02:00
|
|
|
|
|
|
|
def get_username(self):
|
|
|
|
""" Returns current user name """
|
2018-12-17 16:54:38 +02:00
|
|
|
return getpass.getuser()
|
2017-06-27 07:42:52 +02:00
|
|
|
|
2018-01-18 03:35:27 +02:00
|
|
|
def version_to_num(self, version):
|
2018-01-27 17:31:19 +02:00
|
|
|
if not version:
|
|
|
|
return 0
|
2018-12-17 16:44:42 +02:00
|
|
|
parts = version.split('.')
|
2018-01-27 17:31:19 +02:00
|
|
|
while len(parts) < 3:
|
2018-12-17 16:44:42 +02:00
|
|
|
parts.append('0')
|
2018-01-27 17:31:19 +02:00
|
|
|
num = 0
|
|
|
|
for part in parts:
|
|
|
|
num = num * 100 + int(re.sub("[^\d]", "", part))
|
|
|
|
return num
|
2018-01-18 03:35:27 +02:00
|
|
|
|
2017-10-11 17:08:56 +02:00
|
|
|
def switch_wal_segment(self, node):
|
2018-07-25 13:17:04 +02:00
|
|
|
"""
|
|
|
|
Execute pg_switch_wal/xlog() in given node
|
|
|
|
|
|
|
|
Args:
|
|
|
|
node: an instance of PostgresNode or NodeConnection class
|
|
|
|
"""
|
|
|
|
if isinstance(node, testgres.PostgresNode):
|
|
|
|
if self.version_to_num(
|
2018-12-17 16:44:42 +02:00
|
|
|
node.safe_psql('postgres', 'show server_version')
|
2018-07-25 13:17:04 +02:00
|
|
|
) >= self.version_to_num('10.0'):
|
2018-12-17 16:44:42 +02:00
|
|
|
node.safe_psql('postgres', 'select pg_switch_wal()')
|
2018-07-25 13:17:04 +02:00
|
|
|
else:
|
2018-12-17 16:44:42 +02:00
|
|
|
node.safe_psql('postgres', 'select pg_switch_xlog()')
|
2017-10-11 17:08:56 +02:00
|
|
|
else:
|
2018-07-25 13:17:04 +02:00
|
|
|
if self.version_to_num(
|
2018-12-17 16:44:42 +02:00
|
|
|
node.execute('show server_version')[0][0]
|
2018-07-25 13:17:04 +02:00
|
|
|
) >= self.version_to_num('10.0'):
|
2018-12-17 16:44:42 +02:00
|
|
|
node.execute('select pg_switch_wal()')
|
2018-07-25 13:17:04 +02:00
|
|
|
else:
|
2018-12-17 16:44:42 +02:00
|
|
|
node.execute('select pg_switch_xlog()')
|
2017-10-11 17:08:56 +02:00
|
|
|
|
2018-10-18 08:46:02 +02:00
|
|
|
def wait_until_replica_catch_with_master(self, master, replica):
|
|
|
|
|
|
|
|
if self.version_to_num(
|
|
|
|
master.safe_psql(
|
2018-12-17 16:44:42 +02:00
|
|
|
'postgres',
|
|
|
|
'show server_version')) >= self.version_to_num('10.0'):
|
2018-10-18 08:46:02 +02:00
|
|
|
master_function = 'pg_catalog.pg_current_wal_lsn()'
|
|
|
|
replica_function = 'pg_catalog.pg_last_wal_replay_lsn()'
|
|
|
|
else:
|
|
|
|
master_function = 'pg_catalog.pg_current_xlog_location()'
|
|
|
|
replica_function = 'pg_catalog.pg_last_xlog_replay_location()'
|
|
|
|
|
|
|
|
lsn = master.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'SELECT {0}'.format(master_function)).rstrip()
|
|
|
|
|
|
|
|
# Wait until replica catch up with master
|
|
|
|
replica.poll_query_until(
|
|
|
|
'postgres',
|
|
|
|
"SELECT '{0}'::pg_lsn <= {1}".format(lsn, replica_function))
|
|
|
|
|
2017-12-19 12:17:58 +02:00
|
|
|
def get_version(self, node):
|
2018-01-27 17:31:19 +02:00
|
|
|
return self.version_to_num(
|
2018-12-17 16:44:42 +02:00
|
|
|
testgres.get_pg_config()['VERSION'].split(" ")[1])
|
2018-01-27 17:31:19 +02:00
|
|
|
|
|
|
|
def get_bin_path(self, binary):
|
|
|
|
return testgres.get_bin_path(binary)
|
2017-12-19 12:17:58 +02:00
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
def del_test_dir(self, module_name, fname):
|
2017-07-12 16:28:28 +02:00
|
|
|
""" Del testdir and optimistically try to del module dir"""
|
2017-06-27 07:42:52 +02:00
|
|
|
try:
|
2017-10-04 16:03:03 +02:00
|
|
|
testgres.clean_all()
|
2017-06-27 07:42:52 +02:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
shutil.rmtree(
|
|
|
|
os.path.join(
|
|
|
|
self.tmp_path,
|
|
|
|
module_name,
|
|
|
|
fname
|
|
|
|
),
|
|
|
|
ignore_errors=True
|
|
|
|
)
|
2017-06-27 07:42:52 +02:00
|
|
|
try:
|
2017-07-12 16:28:28 +02:00
|
|
|
os.rmdir(os.path.join(self.tmp_path, module_name))
|
2017-06-27 07:42:52 +02:00
|
|
|
except:
|
|
|
|
pass
|
2017-09-28 09:32:06 +02:00
|
|
|
|
2019-01-16 15:31:26 +02:00
|
|
|
def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None):
|
2017-12-22 01:04:59 +02:00
|
|
|
""" return dict with directory content. "
|
|
|
|
" TAKE IT AFTER CHECKPOINT or BACKUP"""
|
|
|
|
dirs_to_ignore = [
|
|
|
|
'pg_xlog', 'pg_wal', 'pg_log',
|
|
|
|
'pg_stat_tmp', 'pg_subtrans', 'pg_notify'
|
|
|
|
]
|
|
|
|
files_to_ignore = [
|
|
|
|
'postmaster.pid', 'postmaster.opts',
|
2018-01-14 16:32:43 +02:00
|
|
|
'pg_internal.init', 'postgresql.auto.conf',
|
|
|
|
'backup_label', 'tablespace_map', 'recovery.conf',
|
2018-01-17 19:51:43 +02:00
|
|
|
'ptrack_control', 'ptrack_init', 'pg_control'
|
2017-12-22 01:04:59 +02:00
|
|
|
]
|
2019-01-16 15:31:26 +02:00
|
|
|
|
|
|
|
if exclude_dirs:
|
|
|
|
dirs_to_ignore = dirs_to_ignore + exclude_dirs
|
2018-03-01 21:25:43 +02:00
|
|
|
# suffixes_to_ignore = (
|
|
|
|
# '_ptrack'
|
|
|
|
# )
|
2017-09-28 09:32:06 +02:00
|
|
|
directory_dict = {}
|
2018-12-13 09:20:49 +02:00
|
|
|
directory_dict['pgdata'] = pgdata
|
2017-09-28 09:32:06 +02:00
|
|
|
directory_dict['files'] = {}
|
2018-12-13 09:20:49 +02:00
|
|
|
directory_dict['dirs'] = []
|
|
|
|
for root, dirs, files in os.walk(pgdata, followlinks=True):
|
2017-09-28 09:32:06 +02:00
|
|
|
dirs[:] = [d for d in dirs if d not in dirs_to_ignore]
|
|
|
|
for file in files:
|
2017-12-28 10:52:18 +02:00
|
|
|
if (
|
|
|
|
file in files_to_ignore or
|
2018-03-01 21:25:43 +02:00
|
|
|
(ignore_ptrack and file.endswith('_ptrack'))
|
2017-12-28 10:52:18 +02:00
|
|
|
):
|
|
|
|
continue
|
|
|
|
|
|
|
|
file_fullpath = os.path.join(root, file)
|
2018-12-13 09:20:49 +02:00
|
|
|
file_relpath = os.path.relpath(file_fullpath, pgdata)
|
2017-12-28 10:52:18 +02:00
|
|
|
directory_dict['files'][file_relpath] = {'is_datafile': False}
|
|
|
|
directory_dict['files'][file_relpath]['md5'] = hashlib.md5(
|
|
|
|
open(file_fullpath, 'rb').read()).hexdigest()
|
|
|
|
|
2019-01-16 16:47:08 +02:00
|
|
|
# crappy algorithm
|
2017-12-28 10:52:18 +02:00
|
|
|
if file.isdigit():
|
|
|
|
directory_dict['files'][file_relpath]['is_datafile'] = True
|
|
|
|
size_in_pages = os.path.getsize(file_fullpath)/8192
|
|
|
|
directory_dict['files'][file_relpath][
|
|
|
|
'md5_per_page'] = self.get_md5_per_page_for_fork(
|
|
|
|
file_fullpath, size_in_pages
|
|
|
|
)
|
|
|
|
|
2018-12-13 09:20:49 +02:00
|
|
|
for root, dirs, files in os.walk(pgdata, topdown=False, followlinks=True):
|
|
|
|
for directory in dirs:
|
|
|
|
directory_path = os.path.join(root, directory)
|
|
|
|
directory_relpath = os.path.relpath(directory_path, pgdata)
|
|
|
|
|
|
|
|
found = False
|
|
|
|
for d in dirs_to_ignore:
|
|
|
|
if d in directory_relpath:
|
|
|
|
found = True
|
|
|
|
break
|
|
|
|
|
|
|
|
# check if directory already here as part of larger directory
|
|
|
|
if not found:
|
|
|
|
for d in directory_dict['dirs']:
|
|
|
|
# print("OLD dir {0}".format(d))
|
|
|
|
if directory_relpath in d:
|
|
|
|
found = True
|
|
|
|
break
|
|
|
|
|
|
|
|
if not found:
|
|
|
|
directory_dict['dirs'].append(directory_relpath)
|
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
return directory_dict
|
|
|
|
|
|
|
|
def compare_pgdata(self, original_pgdata, restored_pgdata):
|
2017-10-03 15:27:03 +02:00
|
|
|
""" return dict with directory content. DO IT BEFORE RECOVERY"""
|
2017-09-28 09:32:06 +02:00
|
|
|
fail = False
|
2018-01-14 16:32:43 +02:00
|
|
|
error_message = 'Restored PGDATA is not equal to original!\n'
|
2018-12-13 09:20:49 +02:00
|
|
|
|
|
|
|
# Compare directories
|
|
|
|
for directory in restored_pgdata['dirs']:
|
|
|
|
if directory not in original_pgdata['dirs']:
|
|
|
|
fail = True
|
2018-12-13 10:45:04 +02:00
|
|
|
error_message += '\nDirectory was not present'
|
2018-12-13 09:20:49 +02:00
|
|
|
error_message += ' in original PGDATA: {0}\n'.format(
|
|
|
|
os.path.join(restored_pgdata['pgdata'], directory))
|
|
|
|
|
|
|
|
for directory in original_pgdata['dirs']:
|
|
|
|
if directory not in restored_pgdata['dirs']:
|
|
|
|
fail = True
|
|
|
|
error_message += '\nDirectory dissappeared'
|
|
|
|
error_message += ' in restored PGDATA: {0}\n'.format(
|
|
|
|
os.path.join(restored_pgdata['pgdata'], directory))
|
|
|
|
|
|
|
|
|
2017-12-28 10:52:18 +02:00
|
|
|
for file in restored_pgdata['files']:
|
|
|
|
# File is present in RESTORED PGDATA
|
|
|
|
# but not present in ORIGINAL
|
2018-01-14 16:32:43 +02:00
|
|
|
# only backup_label is allowed
|
|
|
|
if file not in original_pgdata['files']:
|
2017-12-28 10:52:18 +02:00
|
|
|
fail = True
|
2018-01-14 16:32:43 +02:00
|
|
|
error_message += '\nFile is not present'
|
|
|
|
error_message += ' in original PGDATA: {0}\n'.format(
|
|
|
|
os.path.join(restored_pgdata['pgdata'], file))
|
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
for file in original_pgdata['files']:
|
|
|
|
if file in restored_pgdata['files']:
|
2017-12-28 10:52:18 +02:00
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
if (
|
2017-12-28 10:52:18 +02:00
|
|
|
original_pgdata['files'][file]['md5'] !=
|
|
|
|
restored_pgdata['files'][file]['md5']
|
2017-12-22 01:04:59 +02:00
|
|
|
):
|
2018-01-14 16:32:43 +02:00
|
|
|
fail = True
|
2017-12-28 10:52:18 +02:00
|
|
|
error_message += (
|
2018-01-14 16:32:43 +02:00
|
|
|
'\nFile Checksumm mismatch.\n'
|
|
|
|
'File_old: {0}\nChecksumm_old: {1}\n'
|
|
|
|
'File_new: {2}\nChecksumm_new: {3}\n').format(
|
2017-10-03 15:27:03 +02:00
|
|
|
os.path.join(original_pgdata['pgdata'], file),
|
2017-12-28 10:52:18 +02:00
|
|
|
original_pgdata['files'][file]['md5'],
|
2017-10-03 15:27:03 +02:00
|
|
|
os.path.join(restored_pgdata['pgdata'], file),
|
2017-12-28 10:52:18 +02:00
|
|
|
restored_pgdata['files'][file]['md5']
|
|
|
|
)
|
2018-01-14 16:32:43 +02:00
|
|
|
|
2017-12-28 10:52:18 +02:00
|
|
|
if original_pgdata['files'][file]['is_datafile']:
|
2018-01-14 16:32:43 +02:00
|
|
|
for page in original_pgdata['files'][file]['md5_per_page']:
|
|
|
|
if page not in restored_pgdata['files'][file]['md5_per_page']:
|
|
|
|
error_message += (
|
|
|
|
'\n Page {0} dissappeared.\n '
|
|
|
|
'File: {1}\n').format(
|
|
|
|
page,
|
|
|
|
os.path.join(
|
|
|
|
restored_pgdata['pgdata'],
|
|
|
|
file
|
|
|
|
)
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
2017-12-28 10:52:18 +02:00
|
|
|
if original_pgdata['files'][file][
|
|
|
|
'md5_per_page'][page] != restored_pgdata[
|
|
|
|
'files'][file]['md5_per_page'][page]:
|
|
|
|
error_message += (
|
2018-01-14 16:32:43 +02:00
|
|
|
'\n Page checksumm mismatch: {0}\n '
|
|
|
|
' PAGE Checksumm_old: {1}\n '
|
|
|
|
' PAGE Checksumm_new: {2}\n '
|
|
|
|
' File: {3}\n'
|
2017-12-28 10:52:18 +02:00
|
|
|
).format(
|
|
|
|
page,
|
|
|
|
original_pgdata['files'][file][
|
|
|
|
'md5_per_page'][page],
|
|
|
|
restored_pgdata['files'][file][
|
2018-01-14 16:32:43 +02:00
|
|
|
'md5_per_page'][page],
|
|
|
|
os.path.join(
|
|
|
|
restored_pgdata['pgdata'], file)
|
|
|
|
)
|
|
|
|
for page in restored_pgdata['files'][file]['md5_per_page']:
|
|
|
|
if page not in original_pgdata['files'][file]['md5_per_page']:
|
2018-02-26 16:05:43 +02:00
|
|
|
error_message += '\n Extra page {0}\n File: {1}\n'.format(
|
2018-01-27 17:31:19 +02:00
|
|
|
page,
|
|
|
|
os.path.join(
|
|
|
|
restored_pgdata['pgdata'], file))
|
2017-12-28 10:52:18 +02:00
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
else:
|
2017-12-28 10:52:18 +02:00
|
|
|
error_message += (
|
2018-01-14 16:32:43 +02:00
|
|
|
'\nFile dissappearance.\n '
|
|
|
|
'File: {0}\n').format(
|
2017-12-28 10:52:18 +02:00
|
|
|
os.path.join(restored_pgdata['pgdata'], file)
|
|
|
|
)
|
2017-09-28 09:32:06 +02:00
|
|
|
fail = True
|
|
|
|
self.assertFalse(fail, error_message)
|
2017-12-22 01:04:59 +02:00
|
|
|
|
2018-01-10 15:05:26 +02:00
|
|
|
def get_async_connect(self, database=None, host=None, port=5432):
|
2018-01-10 15:03:33 +02:00
|
|
|
if not database:
|
|
|
|
database = 'postgres'
|
|
|
|
if not host:
|
|
|
|
host = '127.0.0.1'
|
|
|
|
|
|
|
|
return psycopg2.connect(
|
2018-12-17 16:44:42 +02:00
|
|
|
database='postgres',
|
2018-01-10 15:03:33 +02:00
|
|
|
host='127.0.0.1',
|
|
|
|
port=port,
|
2019-01-10 17:17:03 +02:00
|
|
|
async_=True
|
2018-01-10 15:03:33 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
def wait(self, connection):
|
|
|
|
while True:
|
|
|
|
state = connection.poll()
|
|
|
|
if state == psycopg2.extensions.POLL_OK:
|
|
|
|
break
|
|
|
|
elif state == psycopg2.extensions.POLL_WRITE:
|
|
|
|
select.select([], [connection.fileno()], [])
|
|
|
|
elif state == psycopg2.extensions.POLL_READ:
|
|
|
|
select.select([connection.fileno()], [], [])
|
|
|
|
else:
|
2018-12-17 16:44:42 +02:00
|
|
|
raise psycopg2.OperationalError('poll() returned %s' % state)
|
2018-01-10 15:03:33 +02:00
|
|
|
|
|
|
|
def gdb_attach(self, pid):
|
|
|
|
return GDBobj([str(pid)], self.verbose, attach=True)
|
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
|
|
|
|
class GdbException(Exception):
|
|
|
|
def __init__(self, message=False):
|
|
|
|
self.message = message
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return '\n ERROR: {0}\n'.format(repr(self.message))
|
|
|
|
|
|
|
|
|
|
|
|
class GDBobj(ProbackupTest):
|
2018-01-10 15:03:33 +02:00
|
|
|
def __init__(self, cmd, verbose, attach=False):
|
2017-12-25 10:27:38 +02:00
|
|
|
self.verbose = verbose
|
|
|
|
|
|
|
|
# Check gdb presense
|
|
|
|
try:
|
|
|
|
gdb_version, _ = subprocess.Popen(
|
2018-12-17 16:44:42 +02:00
|
|
|
['gdb', '--version'],
|
2017-12-25 10:27:38 +02:00
|
|
|
stdout=subprocess.PIPE
|
|
|
|
).communicate()
|
|
|
|
except OSError:
|
|
|
|
raise GdbException("Couldn't find gdb on the path")
|
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
self.base_cmd = [
|
2017-12-25 10:27:38 +02:00
|
|
|
'gdb',
|
2017-12-22 01:04:59 +02:00
|
|
|
'--interpreter',
|
|
|
|
'mi2',
|
2018-01-10 15:03:33 +02:00
|
|
|
]
|
|
|
|
|
|
|
|
if attach:
|
|
|
|
self.cmd = self.base_cmd + ['--pid'] + cmd
|
|
|
|
else:
|
|
|
|
self.cmd = self.base_cmd + ['--args'] + cmd
|
2017-12-25 10:27:38 +02:00
|
|
|
|
|
|
|
# Get version
|
|
|
|
gdb_version_number = re.search(
|
|
|
|
b"^GNU gdb [^\d]*(\d+)\.(\d)",
|
|
|
|
gdb_version)
|
|
|
|
self.major_version = int(gdb_version_number.group(1))
|
|
|
|
self.minor_version = int(gdb_version_number.group(2))
|
2018-01-10 15:03:33 +02:00
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
if self.verbose:
|
2018-01-10 15:03:33 +02:00
|
|
|
print([' '.join(map(str, self.cmd))])
|
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
self.proc = subprocess.Popen(
|
2018-01-10 15:03:33 +02:00
|
|
|
self.cmd,
|
2017-12-22 01:04:59 +02:00
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.STDOUT,
|
2018-01-10 15:03:33 +02:00
|
|
|
bufsize=0,
|
|
|
|
universal_newlines=True
|
2017-12-25 10:27:38 +02:00
|
|
|
)
|
2017-12-22 01:04:59 +02:00
|
|
|
self.gdb_pid = self.proc.pid
|
|
|
|
|
2017-12-25 10:27:38 +02:00
|
|
|
# discard data from pipe,
|
|
|
|
# is there a way to do it a less derpy way?
|
2017-12-22 01:04:59 +02:00
|
|
|
while True:
|
|
|
|
line = self.proc.stdout.readline()
|
2018-01-10 15:03:33 +02:00
|
|
|
|
|
|
|
if 'No such process' in line:
|
|
|
|
raise GdbException(line)
|
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
if not line.startswith('(gdb)'):
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
def set_breakpoint(self, location):
|
2019-02-12 18:52:52 +02:00
|
|
|
|
2017-12-22 01:04:59 +02:00
|
|
|
result = self._execute('break ' + location)
|
2017-12-25 10:27:38 +02:00
|
|
|
for line in result:
|
2017-12-22 01:04:59 +02:00
|
|
|
if line.startswith('~"Breakpoint'):
|
2018-01-10 15:03:33 +02:00
|
|
|
return
|
|
|
|
|
2019-02-12 18:52:52 +02:00
|
|
|
elif line.startswith('=breakpoint-created'):
|
|
|
|
return
|
|
|
|
|
|
|
|
elif line.startswith('^error'): #or line.startswith('(gdb)'):
|
2017-12-22 01:04:59 +02:00
|
|
|
break
|
2017-12-25 10:27:38 +02:00
|
|
|
|
2018-01-10 15:03:33 +02:00
|
|
|
elif line.startswith('&"break'):
|
2017-12-25 10:27:38 +02:00
|
|
|
pass
|
|
|
|
|
2018-01-10 15:03:33 +02:00
|
|
|
elif line.startswith('&"Function'):
|
2017-12-25 10:27:38 +02:00
|
|
|
raise GdbException(line)
|
|
|
|
|
2018-01-10 15:03:33 +02:00
|
|
|
elif line.startswith('&"No line'):
|
2017-12-25 10:27:38 +02:00
|
|
|
raise GdbException(line)
|
2017-12-22 01:04:59 +02:00
|
|
|
|
2018-01-10 15:03:33 +02:00
|
|
|
elif line.startswith('~"Make breakpoint pending on future shared'):
|
|
|
|
raise GdbException(line)
|
|
|
|
|
|
|
|
raise GdbException(
|
|
|
|
'Failed to set breakpoint.\n Output:\n {0}'.format(result)
|
|
|
|
)
|
|
|
|
|
|
|
|
def run_until_break(self):
|
|
|
|
result = self._execute('run', False)
|
2017-12-25 10:27:38 +02:00
|
|
|
for line in result:
|
2017-12-22 01:04:59 +02:00
|
|
|
if line.startswith('*stopped,reason="breakpoint-hit"'):
|
2018-01-10 15:03:33 +02:00
|
|
|
return
|
|
|
|
raise GdbException(
|
|
|
|
'Failed to run until breakpoint.\n'
|
|
|
|
)
|
2017-12-22 01:04:59 +02:00
|
|
|
|
2018-01-10 15:03:33 +02:00
|
|
|
def continue_execution_until_running(self):
|
2017-12-22 01:04:59 +02:00
|
|
|
result = self._execute('continue')
|
2018-01-10 15:03:33 +02:00
|
|
|
|
2017-12-25 10:27:38 +02:00
|
|
|
for line in result:
|
2019-02-26 20:26:30 +02:00
|
|
|
if line.startswith('*running') or line.startswith('^running'):
|
|
|
|
return
|
2018-01-10 15:03:33 +02:00
|
|
|
if line.startswith('*stopped,reason="breakpoint-hit"'):
|
|
|
|
continue
|
|
|
|
if line.startswith('*stopped,reason="exited-normally"'):
|
|
|
|
continue
|
2019-02-26 20:26:30 +02:00
|
|
|
|
|
|
|
raise GdbException(
|
|
|
|
'Failed to continue execution until running.\n'
|
|
|
|
)
|
2018-01-10 15:03:33 +02:00
|
|
|
|
|
|
|
def continue_execution_until_exit(self):
|
|
|
|
result = self._execute('continue', False)
|
|
|
|
|
|
|
|
for line in result:
|
|
|
|
if line.startswith('*running'):
|
|
|
|
continue
|
|
|
|
if line.startswith('*stopped,reason="breakpoint-hit"'):
|
|
|
|
continue
|
2018-01-17 19:51:43 +02:00
|
|
|
if (
|
2019-02-26 20:26:30 +02:00
|
|
|
line.startswith('*stopped,reason="exited') or
|
2018-01-17 19:51:43 +02:00
|
|
|
line == '*stopped\n'
|
|
|
|
):
|
2018-01-10 15:03:33 +02:00
|
|
|
return
|
2019-02-26 20:26:30 +02:00
|
|
|
|
2018-01-10 15:03:33 +02:00
|
|
|
raise GdbException(
|
|
|
|
'Failed to continue execution until exit.\n'
|
|
|
|
)
|
|
|
|
|
2019-02-18 17:16:53 +02:00
|
|
|
def continue_execution_until_error(self):
|
|
|
|
result = self._execute('continue', False)
|
|
|
|
|
|
|
|
for line in result:
|
|
|
|
if line.startswith('^error'):
|
|
|
|
return
|
|
|
|
if line.startswith('*stopped,reason="exited'):
|
|
|
|
return
|
|
|
|
|
|
|
|
raise GdbException(
|
|
|
|
'Failed to continue execution until error.\n')
|
|
|
|
|
2018-01-10 15:03:33 +02:00
|
|
|
def continue_execution_until_break(self, ignore_count=0):
|
|
|
|
if ignore_count > 0:
|
|
|
|
result = self._execute(
|
|
|
|
'continue ' + str(ignore_count),
|
|
|
|
False
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
result = self._execute('continue', False)
|
|
|
|
|
|
|
|
for line in result:
|
2017-12-22 01:04:59 +02:00
|
|
|
if line.startswith('*stopped,reason="breakpoint-hit"'):
|
2019-03-02 01:29:58 +02:00
|
|
|
return
|
2017-12-22 01:04:59 +02:00
|
|
|
if line.startswith('*stopped,reason="exited-normally"'):
|
2019-03-02 01:29:58 +02:00
|
|
|
break
|
2019-02-26 20:26:30 +02:00
|
|
|
|
2019-03-02 01:29:58 +02:00
|
|
|
raise GdbException(
|
|
|
|
'Failed to continue execution until break.\n')
|
2018-01-10 15:03:33 +02:00
|
|
|
|
|
|
|
def stopped_in_breakpoint(self):
|
|
|
|
output = []
|
|
|
|
while True:
|
|
|
|
line = self.proc.stdout.readline()
|
|
|
|
output += [line]
|
|
|
|
if self.verbose:
|
|
|
|
print(line)
|
|
|
|
if line.startswith('*stopped,reason="breakpoint-hit"'):
|
|
|
|
return True
|
|
|
|
return False
|
2017-12-22 01:04:59 +02:00
|
|
|
|
|
|
|
# use for breakpoint, run, continue
|
2018-01-10 15:03:33 +02:00
|
|
|
def _execute(self, cmd, running=True):
|
2017-12-25 10:27:38 +02:00
|
|
|
output = []
|
2017-12-22 01:04:59 +02:00
|
|
|
self.proc.stdin.flush()
|
|
|
|
self.proc.stdin.write(cmd + '\n')
|
|
|
|
self.proc.stdin.flush()
|
|
|
|
|
|
|
|
while True:
|
|
|
|
line = self.proc.stdout.readline()
|
2017-12-25 10:27:38 +02:00
|
|
|
output += [line]
|
2017-12-22 01:04:59 +02:00
|
|
|
if self.verbose:
|
2018-01-17 19:51:43 +02:00
|
|
|
print(repr(line))
|
2019-02-12 18:52:52 +02:00
|
|
|
if line.startswith('^done') or line.startswith('*stopped'):
|
2017-12-22 01:04:59 +02:00
|
|
|
break
|
2019-02-18 17:16:53 +02:00
|
|
|
if line.startswith('^error'):
|
|
|
|
break
|
2019-02-16 14:26:37 +02:00
|
|
|
if running and (line.startswith('*running') or line.startswith('^running')):
|
|
|
|
# if running and line.startswith('*running'):
|
2018-01-10 15:03:33 +02:00
|
|
|
break
|
2017-12-22 01:04:59 +02:00
|
|
|
return output
|