You've already forked pg_probackup
mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-09-16 09:26:30 +02:00
tests: gdb class, ptrack test for PGPRO-1230, PEP8
This commit is contained in:
@@ -11,38 +11,38 @@ import pwd
|
|||||||
|
|
||||||
|
|
||||||
idx_ptrack = {
|
idx_ptrack = {
|
||||||
't_heap': {
|
't_heap': {
|
||||||
'type': 'heap'
|
'type': 'heap'
|
||||||
},
|
},
|
||||||
't_btree': {
|
't_btree': {
|
||||||
'type': 'btree',
|
'type': 'btree',
|
||||||
'column': 'text',
|
'column': 'text',
|
||||||
'relation': 't_heap'
|
'relation': 't_heap'
|
||||||
},
|
},
|
||||||
't_seq': {
|
't_seq': {
|
||||||
'type': 'seq',
|
'type': 'seq',
|
||||||
'column': 't_seq',
|
'column': 't_seq',
|
||||||
'relation': 't_heap'
|
'relation': 't_heap'
|
||||||
},
|
},
|
||||||
't_spgist': {
|
't_spgist': {
|
||||||
'type': 'spgist',
|
'type': 'spgist',
|
||||||
'column': 'text',
|
'column': 'text',
|
||||||
'relation': 't_heap'
|
'relation': 't_heap'
|
||||||
},
|
},
|
||||||
't_brin': {
|
't_brin': {
|
||||||
'type': 'brin',
|
'type': 'brin',
|
||||||
'column': 'text',
|
'column': 'text',
|
||||||
'relation': 't_heap'
|
'relation': 't_heap'
|
||||||
},
|
},
|
||||||
't_gist': {
|
't_gist': {
|
||||||
'type': 'gist',
|
'type': 'gist',
|
||||||
'column': 'tsvector',
|
'column': 'tsvector',
|
||||||
'relation': 't_heap'
|
'relation': 't_heap'
|
||||||
},
|
},
|
||||||
't_gin': {
|
't_gin': {
|
||||||
'type': 'gin',
|
'type': 'gin',
|
||||||
'column': 'tsvector',
|
'column': 'tsvector',
|
||||||
'relation': 't_heap'
|
'relation': 't_heap'
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,15 +71,6 @@ Splitted Body
|
|||||||
{body_split}
|
{body_split}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# You can lookup error message and cmdline in exception object attributes
|
|
||||||
class ProbackupException(Exception):
|
|
||||||
def __init__(self, message, cmd):
|
|
||||||
self.message = message
|
|
||||||
self.cmd = cmd
|
|
||||||
#need that to make second raise
|
|
||||||
def __str__(self):
|
|
||||||
return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def dir_files(base_dir):
|
def dir_files(base_dir):
|
||||||
out_list = []
|
out_list = []
|
||||||
@@ -87,19 +78,36 @@ def dir_files(base_dir):
|
|||||||
if dir_name != base_dir:
|
if dir_name != base_dir:
|
||||||
out_list.append(os.path.relpath(dir_name, base_dir))
|
out_list.append(os.path.relpath(dir_name, base_dir))
|
||||||
for fname in file_list:
|
for fname in file_list:
|
||||||
out_list.append(os.path.relpath(os.path.join(dir_name, fname), base_dir))
|
out_list.append(
|
||||||
|
os.path.relpath(os.path.join(
|
||||||
|
dir_name, fname), base_dir)
|
||||||
|
)
|
||||||
out_list.sort()
|
out_list.sort()
|
||||||
return out_list
|
return out_list
|
||||||
|
|
||||||
|
|
||||||
def is_enterprise():
|
def is_enterprise():
|
||||||
# pg_config --help
|
# pg_config --help
|
||||||
p = subprocess.Popen([os.environ['PG_CONFIG'], '--help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
p = subprocess.Popen(
|
||||||
|
[os.environ['PG_CONFIG'], '--help'],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE
|
||||||
|
)
|
||||||
if 'postgrespro.ru' in p.communicate()[0]:
|
if 'postgrespro.ru' in p.communicate()[0]:
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class ProbackupException(Exception):
|
||||||
|
def __init__(self, message, cmd):
|
||||||
|
self.message = message
|
||||||
|
self.cmd = cmd
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd)
|
||||||
|
|
||||||
|
|
||||||
class ProbackupTest(object):
|
class ProbackupTest(object):
|
||||||
# Class attributes
|
# Class attributes
|
||||||
enterprise = is_enterprise()
|
enterprise = is_enterprise()
|
||||||
@@ -147,8 +155,12 @@ class ProbackupTest(object):
|
|||||||
self.archive_compress = True
|
self.archive_compress = True
|
||||||
|
|
||||||
self.helpers_path = os.path.dirname(os.path.realpath(__file__))
|
self.helpers_path = os.path.dirname(os.path.realpath(__file__))
|
||||||
self.dir_path = os.path.abspath(os.path.join(self.helpers_path, os.pardir))
|
self.dir_path = os.path.abspath(
|
||||||
self.tmp_path = os.path.abspath(os.path.join(self.dir_path, 'tmp_dirs'))
|
os.path.join(self.helpers_path, os.pardir)
|
||||||
|
)
|
||||||
|
self.tmp_path = os.path.abspath(
|
||||||
|
os.path.join(self.dir_path, 'tmp_dirs')
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
os.makedirs(os.path.join(self.dir_path, 'tmp_dirs'))
|
os.makedirs(os.path.join(self.dir_path, 'tmp_dirs'))
|
||||||
except:
|
except:
|
||||||
@@ -157,7 +169,10 @@ class ProbackupTest(object):
|
|||||||
self.user = self.get_username()
|
self.user = self.get_username()
|
||||||
self.probackup_path = None
|
self.probackup_path = None
|
||||||
if "PGPROBACKUPBIN" in self.test_env:
|
if "PGPROBACKUPBIN" in self.test_env:
|
||||||
if os.path.isfile(self.test_env["PGPROBACKUPBIN"]) and os.access(self.test_env["PGPROBACKUPBIN"], os.X_OK):
|
if (
|
||||||
|
os.path.isfile(self.test_env["PGPROBACKUPBIN"]) and
|
||||||
|
os.access(self.test_env["PGPROBACKUPBIN"], os.X_OK)
|
||||||
|
):
|
||||||
self.probackup_path = self.test_env["PGPROBACKUPBIN"]
|
self.probackup_path = self.test_env["PGPROBACKUPBIN"]
|
||||||
else:
|
else:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
@@ -184,10 +199,12 @@ class ProbackupTest(object):
|
|||||||
node.append_conf("postgresql.auto.conf", "fsync = on")
|
node.append_conf("postgresql.auto.conf", "fsync = on")
|
||||||
node.append_conf("postgresql.auto.conf", "wal_level = minimal")
|
node.append_conf("postgresql.auto.conf", "wal_level = minimal")
|
||||||
|
|
||||||
node.append_conf("postgresql.auto.conf", "log_line_prefix = '%t [%p]: [%l-1] '")
|
node.append_conf(
|
||||||
|
"postgresql.auto.conf", "log_line_prefix = '%t [%p]: [%l-1] '")
|
||||||
node.append_conf("postgresql.auto.conf", "log_statement = none")
|
node.append_conf("postgresql.auto.conf", "log_statement = none")
|
||||||
node.append_conf("postgresql.auto.conf", "log_duration = on")
|
node.append_conf("postgresql.auto.conf", "log_duration = on")
|
||||||
node.append_conf("postgresql.auto.conf", "log_min_duration_statement = 0")
|
node.append_conf(
|
||||||
|
"postgresql.auto.conf", "log_min_duration_statement = 0")
|
||||||
node.append_conf("postgresql.auto.conf", "log_connections = on")
|
node.append_conf("postgresql.auto.conf", "log_connections = on")
|
||||||
node.append_conf("postgresql.auto.conf", "log_disconnections = on")
|
node.append_conf("postgresql.auto.conf", "log_disconnections = on")
|
||||||
|
|
||||||
@@ -203,36 +220,50 @@ class ProbackupTest(object):
|
|||||||
|
|
||||||
def create_tblspace_in_node(self, node, tblspc_name, cfs=False):
|
def create_tblspace_in_node(self, node, tblspc_name, cfs=False):
|
||||||
res = node.execute(
|
res = node.execute(
|
||||||
"postgres", "select exists (select 1 from pg_tablespace where spcname = '{0}')".format(
|
"postgres",
|
||||||
tblspc_name))
|
"select exists"
|
||||||
|
" (select 1 from pg_tablespace where spcname = '{0}')".format(
|
||||||
|
tblspc_name)
|
||||||
|
)
|
||||||
# Check that tablespace with name 'tblspc_name' do not exists already
|
# Check that tablespace with name 'tblspc_name' do not exists already
|
||||||
self.assertFalse(res[0][0], 'Tablespace "{0}" already exists'.format(tblspc_name))
|
self.assertFalse(
|
||||||
|
res[0][0],
|
||||||
|
'Tablespace "{0}" already exists'.format(tblspc_name)
|
||||||
|
)
|
||||||
|
|
||||||
tblspc_path = os.path.join(node.base_dir, '{0}'.format(tblspc_name))
|
tblspc_path = os.path.join(node.base_dir, '{0}'.format(tblspc_name))
|
||||||
cmd = "CREATE TABLESPACE {0} LOCATION '{1}'".format(tblspc_name, tblspc_path)
|
cmd = "CREATE TABLESPACE {0} LOCATION '{1}'".format(
|
||||||
|
tblspc_name, tblspc_path)
|
||||||
if cfs:
|
if cfs:
|
||||||
cmd += " with (compression=true)"
|
cmd += " with (compression=true)"
|
||||||
os.makedirs(tblspc_path)
|
os.makedirs(tblspc_path)
|
||||||
res = node.safe_psql("postgres", cmd)
|
res = node.safe_psql("postgres", cmd)
|
||||||
# Check that tablespace was successfully created
|
# Check that tablespace was successfully created
|
||||||
# self.assertEqual(res[0], 0, 'Failed to create tablespace with cmd: {0}'.format(cmd))
|
# self.assertEqual(
|
||||||
|
# res[0], 0,
|
||||||
|
# 'Failed to create tablespace with cmd: {0}'.format(cmd))
|
||||||
|
|
||||||
def get_tblspace_path(self, node, tblspc_name):
|
def get_tblspace_path(self, node, tblspc_name):
|
||||||
return os.path.join(node.base_dir, tblspc_name)
|
return os.path.join(node.base_dir, tblspc_name)
|
||||||
|
|
||||||
def get_fork_size(self, node, fork_name):
|
def get_fork_size(self, node, fork_name):
|
||||||
return node.execute("postgres",
|
return node.execute(
|
||||||
|
"postgres",
|
||||||
"select pg_relation_size('{0}')/8192".format(fork_name))[0][0]
|
"select pg_relation_size('{0}')/8192".format(fork_name))[0][0]
|
||||||
|
|
||||||
def get_fork_path(self, node, fork_name):
|
def get_fork_path(self, node, fork_name):
|
||||||
return os.path.join(node.base_dir, 'data',
|
return os.path.join(
|
||||||
node.execute("postgres", "select pg_relation_filepath('{0}')".format(fork_name))[0][0])
|
node.base_dir, 'data', node.execute(
|
||||||
|
"postgres",
|
||||||
|
"select pg_relation_filepath('{0}')".format(
|
||||||
|
fork_name))[0][0]
|
||||||
|
)
|
||||||
|
|
||||||
def get_md5_per_page_for_fork(self, file, size_in_pages):
|
def get_md5_per_page_for_fork(self, file, size_in_pages):
|
||||||
pages_per_segment = {}
|
pages_per_segment = {}
|
||||||
md5_per_page = {}
|
md5_per_page = {}
|
||||||
nsegments = size_in_pages/131072
|
nsegments = size_in_pages/131072
|
||||||
if size_in_pages%131072 != 0:
|
if size_in_pages % 131072 != 0:
|
||||||
nsegments = nsegments + 1
|
nsegments = nsegments + 1
|
||||||
|
|
||||||
size = size_in_pages
|
size = size_in_pages
|
||||||
@@ -250,12 +281,15 @@ class ProbackupTest(object):
|
|||||||
start_page = 0
|
start_page = 0
|
||||||
end_page = pages_per_segment[segment_number]
|
end_page = pages_per_segment[segment_number]
|
||||||
else:
|
else:
|
||||||
file_desc = os.open(file+".{0}".format(segment_number), os.O_RDONLY)
|
file_desc = os.open(
|
||||||
|
file+".{0}".format(segment_number), os.O_RDONLY
|
||||||
|
)
|
||||||
start_page = max(md5_per_page)+1
|
start_page = max(md5_per_page)+1
|
||||||
end_page = end_page + pages_per_segment[segment_number]
|
end_page = end_page + pages_per_segment[segment_number]
|
||||||
|
|
||||||
for page in range(start_page, end_page):
|
for page in range(start_page, end_page):
|
||||||
md5_per_page[page] = hashlib.md5(os.read(file_desc, 8192)).hexdigest()
|
md5_per_page[page] = hashlib.md5(
|
||||||
|
os.read(file_desc, 8192)).hexdigest()
|
||||||
offset += 8192
|
offset += 8192
|
||||||
os.lseek(file_desc, offset, 0)
|
os.lseek(file_desc, offset, 0)
|
||||||
os.close(file_desc)
|
os.close(file_desc)
|
||||||
@@ -273,7 +307,7 @@ class ProbackupTest(object):
|
|||||||
page_body_size = 8192-header_size
|
page_body_size = 8192-header_size
|
||||||
byte_size = os.path.getsize(file + '_ptrack')
|
byte_size = os.path.getsize(file + '_ptrack')
|
||||||
npages = byte_size/8192
|
npages = byte_size/8192
|
||||||
if byte_size%8192 != 0:
|
if byte_size % 8192 != 0:
|
||||||
print('Ptrack page is not 8k aligned')
|
print('Ptrack page is not 8k aligned')
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@@ -283,14 +317,17 @@ class ProbackupTest(object):
|
|||||||
offset = 8192*page+header_size
|
offset = 8192*page+header_size
|
||||||
os.lseek(file, offset, 0)
|
os.lseek(file, offset, 0)
|
||||||
lots_of_bytes = os.read(file, page_body_size)
|
lots_of_bytes = os.read(file, page_body_size)
|
||||||
byte_list = [lots_of_bytes[i:i+1] for i in range(len(lots_of_bytes))]
|
byte_list = [
|
||||||
|
lots_of_bytes[i:i+1] for i in range(len(lots_of_bytes))
|
||||||
|
]
|
||||||
for byte in byte_list:
|
for byte in byte_list:
|
||||||
#byte_inverted = bin(int(byte, base=16))[2:][::-1]
|
# byte_inverted = bin(int(byte, base=16))[2:][::-1]
|
||||||
#bits = (byte >> x) & 1 for x in range(7, -1, -1)
|
# bits = (byte >> x) & 1 for x in range(7, -1, -1)
|
||||||
byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1]
|
byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1]
|
||||||
for bit in byte_inverted:
|
for bit in byte_inverted:
|
||||||
# if len(ptrack_bits_for_fork) < size:
|
# if len(ptrack_bits_for_fork) < size:
|
||||||
ptrack_bits_for_fork.append(int(bit))
|
ptrack_bits_for_fork.append(int(bit))
|
||||||
|
|
||||||
os.close(file)
|
os.close(file)
|
||||||
return ptrack_bits_for_fork
|
return ptrack_bits_for_fork
|
||||||
|
|
||||||
@@ -306,83 +343,150 @@ class ProbackupTest(object):
|
|||||||
# Ptrack should be equal to 1
|
# Ptrack should be equal to 1
|
||||||
if idx_dict['ptrack'][PageNum] != 1:
|
if idx_dict['ptrack'][PageNum] != 1:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print('Page Number {0} of type {1} was added, but ptrack value is {2}. THIS IS BAD'.format(
|
print(
|
||||||
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]))
|
'Page Number {0} of type {1} was added,'
|
||||||
|
' but ptrack value is {2}. THIS IS BAD'.format(
|
||||||
|
PageNum, idx_dict['type'],
|
||||||
|
idx_dict['ptrack'][PageNum])
|
||||||
|
)
|
||||||
# print(idx_dict)
|
# print(idx_dict)
|
||||||
success = False
|
success = False
|
||||||
continue
|
continue
|
||||||
if PageNum not in idx_dict['new_pages']:
|
if PageNum not in idx_dict['new_pages']:
|
||||||
# Page is not present now, meaning that relation got smaller
|
# Page is not present now, meaning that relation got smaller
|
||||||
# Ptrack should be equal to 0, We are not freaking out about false positive stuff
|
# Ptrack should be equal to 0,
|
||||||
|
# We are not freaking out about false positive stuff
|
||||||
if idx_dict['ptrack'][PageNum] != 0:
|
if idx_dict['ptrack'][PageNum] != 0:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print('Page Number {0} of type {1} was deleted, but ptrack value is {2}'.format(
|
print(
|
||||||
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]))
|
'Page Number {0} of type {1} was deleted,'
|
||||||
|
' but ptrack value is {2}'.format(
|
||||||
|
PageNum, idx_dict['type'],
|
||||||
|
idx_dict['ptrack'][PageNum])
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
# Ok, all pages in new_pages that do not have corresponding page in old_pages
|
# Ok, all pages in new_pages that do not have
|
||||||
# are been dealt with. We can now safely proceed to comparing old and new pages
|
# corresponding page in old_pages are been dealt with.
|
||||||
if idx_dict['new_pages'][PageNum] != idx_dict['old_pages'][PageNum]:
|
# We can now safely proceed to comparing old and new pages
|
||||||
# Page has been changed, meaning that ptrack should be equal to 1
|
if idx_dict['new_pages'][
|
||||||
|
PageNum] != idx_dict['old_pages'][PageNum]:
|
||||||
|
# Page has been changed,
|
||||||
|
# meaning that ptrack should be equal to 1
|
||||||
if idx_dict['ptrack'][PageNum] != 1:
|
if idx_dict['ptrack'][PageNum] != 1:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print('Page Number {0} of type {1} was changed, but ptrack value is {2}. THIS IS BAD'.format(
|
print(
|
||||||
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]))
|
'Page Number {0} of type {1} was changed,'
|
||||||
print("\n Old checksumm: {0}\n New checksumm: {1}".format(idx_dict['old_pages'][PageNum], idx_dict['new_pages'][PageNum]))
|
' but ptrack value is {2}. THIS IS BAD'.format(
|
||||||
#print(idx_dict)
|
PageNum, idx_dict['type'],
|
||||||
|
idx_dict['ptrack'][PageNum])
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"\n Old checksumm: {0}\n"
|
||||||
|
" New checksumm: {1}".format(
|
||||||
|
idx_dict['old_pages'][PageNum],
|
||||||
|
idx_dict['new_pages'][PageNum])
|
||||||
|
)
|
||||||
|
|
||||||
if PageNum == 0 and idx_dict['type'] == 'spgist':
|
if PageNum == 0 and idx_dict['type'] == 'spgist':
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print('SPGIST is a special snowflake, so don`t fret about losing ptrack for blknum 0')
|
print(
|
||||||
|
'SPGIST is a special snowflake, so don`t'
|
||||||
|
'fret about losing ptrack for blknum 0'
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
success = False
|
success = False
|
||||||
else:
|
else:
|
||||||
# Page has not been changed, meaning that ptrack should be equal to 0
|
# Page has not been changed,
|
||||||
|
# meaning that ptrack should be equal to 0
|
||||||
if idx_dict['ptrack'][PageNum] != 0:
|
if idx_dict['ptrack'][PageNum] != 0:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print('Page Number {0} of type {1} was not changed, but ptrack value is {2}'.format(
|
print(
|
||||||
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum]))
|
'Page Number {0} of type {1} was not changed,'
|
||||||
#print(idx_dict)
|
' but ptrack value is {2}'.format(
|
||||||
#self.assertEqual(success, True, 'Ptrack does not correspond to state of its pages.\n Gory Details: \n{0}'.format(
|
PageNum, idx_dict['type'],
|
||||||
# idx_dict['type'], idx_dict))
|
idx_dict['ptrack'][PageNum]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertTrue(
|
||||||
|
success, 'Ptrack does not correspond to state'
|
||||||
|
' of its own pages.\n Gory Details: \n{0}'.format(
|
||||||
|
idx_dict['type'], idx_dict
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def check_ptrack_recovery(self, idx_dict):
|
def check_ptrack_recovery(self, idx_dict):
|
||||||
size = idx_dict['size']
|
size = idx_dict['size']
|
||||||
for PageNum in range(size):
|
for PageNum in range(size):
|
||||||
if idx_dict['ptrack'][PageNum] != 1:
|
if idx_dict['ptrack'][PageNum] != 1:
|
||||||
self.assertTrue(False, 'Recovery for Page Number {0} of Type {1} was conducted, but ptrack value is {2}. THIS IS BAD\n IDX_DICT: {3}'.format(
|
self.assertTrue(
|
||||||
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum], idx_dict))
|
False,
|
||||||
|
'Recovery for Page Number {0} of Type {1}'
|
||||||
|
' was conducted, but ptrack value is {2}.'
|
||||||
|
' THIS IS BAD\n IDX_DICT: {3}'.format(
|
||||||
|
PageNum, idx_dict['type'],
|
||||||
|
idx_dict['ptrack'][PageNum],
|
||||||
|
idx_dict
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def check_ptrack_clean(self, idx_dict, size):
|
def check_ptrack_clean(self, idx_dict, size):
|
||||||
for PageNum in range(size):
|
for PageNum in range(size):
|
||||||
if idx_dict['ptrack'][PageNum] != 0:
|
if idx_dict['ptrack'][PageNum] != 0:
|
||||||
self.assertTrue(False, 'Ptrack for Page Number {0} of Type {1} should be clean, but ptrack value is {2}.\n THIS IS BAD\n IDX_DICT: {3}'.format(
|
self.assertTrue(
|
||||||
PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum], idx_dict))
|
False,
|
||||||
|
'Ptrack for Page Number {0} of Type {1}'
|
||||||
|
' should be clean, but ptrack value is {2}.'
|
||||||
|
'\n THIS IS BAD\n IDX_DICT: {3}'.format(
|
||||||
|
PageNum,
|
||||||
|
idx_dict['type'],
|
||||||
|
idx_dict['ptrack'][PageNum],
|
||||||
|
idx_dict
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def run_pb(self, command):
|
def run_pb(self, command, async=False, gdb=False):
|
||||||
try:
|
try:
|
||||||
self.cmd = [' '.join(map(str,[self.probackup_path] + command))]
|
self.cmd = [' '.join(map(str, [self.probackup_path] + command))]
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print(self.cmd)
|
print(self.cmd)
|
||||||
self.output = subprocess.check_output(
|
if gdb:
|
||||||
[self.probackup_path] + command,
|
return GDBobj([self.probackup_path] + command, verbose=True)
|
||||||
stderr=subprocess.STDOUT,
|
if async:
|
||||||
env=self.test_env
|
return subprocess.Popen(
|
||||||
).decode("utf-8")
|
self.cmd,
|
||||||
if command[0] == 'backup':
|
stdout=subprocess.PIPE,
|
||||||
# return backup ID
|
stderr=subprocess.PIPE,
|
||||||
for line in self.output.splitlines():
|
env=self.test_env
|
||||||
if 'INFO: Backup' and 'completed' in line:
|
)
|
||||||
return line.split()[2]
|
|
||||||
else:
|
else:
|
||||||
return self.output
|
self.output = subprocess.check_output(
|
||||||
|
[self.probackup_path] + command,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
env=self.test_env
|
||||||
|
).decode("utf-8")
|
||||||
|
if command[0] == 'backup':
|
||||||
|
# return backup ID
|
||||||
|
for line in self.output.splitlines():
|
||||||
|
if 'INFO: Backup' and 'completed' in line:
|
||||||
|
return line.split()[2]
|
||||||
|
else:
|
||||||
|
return self.output
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
raise ProbackupException(e.output.decode("utf-8"), self.cmd)
|
raise ProbackupException(e.output.decode("utf-8"), self.cmd)
|
||||||
|
|
||||||
def run_binary(self, command, async=False):
|
def run_binary(self, command, async=False):
|
||||||
try:
|
try:
|
||||||
if async:
|
if async:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print(command)
|
print(command)
|
||||||
return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.test_env)
|
return subprocess.Popen(
|
||||||
|
command,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
env=self.test_env
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print(command)
|
print(command)
|
||||||
@@ -393,7 +497,7 @@ class ProbackupTest(object):
|
|||||||
).decode("utf-8")
|
).decode("utf-8")
|
||||||
return self.output
|
return self.output
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
raise ProbackupException(e.output.decode("utf-8"), command)
|
raise ProbackupException(e.output.decode("utf-8"), command)
|
||||||
|
|
||||||
def init_pb(self, backup_dir):
|
def init_pb(self, backup_dir):
|
||||||
|
|
||||||
@@ -423,7 +527,10 @@ class ProbackupTest(object):
|
|||||||
def clean_pb(self, backup_dir):
|
def clean_pb(self, backup_dir):
|
||||||
shutil.rmtree(backup_dir, ignore_errors=True)
|
shutil.rmtree(backup_dir, ignore_errors=True)
|
||||||
|
|
||||||
def backup_node(self, backup_dir, instance, node, data_dir=False, backup_type="full", options=[]):
|
def backup_node(
|
||||||
|
self, backup_dir, instance, node, data_dir=False,
|
||||||
|
backup_type="full", options=[], async=False, gdb=False
|
||||||
|
):
|
||||||
if not node and not data_dir:
|
if not node and not data_dir:
|
||||||
print('You must provide ether node or data_dir for backup')
|
print('You must provide ether node or data_dir for backup')
|
||||||
exit(1)
|
exit(1)
|
||||||
@@ -437,7 +544,7 @@ class ProbackupTest(object):
|
|||||||
cmd_list = [
|
cmd_list = [
|
||||||
"backup",
|
"backup",
|
||||||
"-B", backup_dir,
|
"-B", backup_dir,
|
||||||
# "-D", pgdata,
|
# "-D", pgdata,
|
||||||
"-p", "%i" % node.port,
|
"-p", "%i" % node.port,
|
||||||
"-d", "postgres",
|
"-d", "postgres",
|
||||||
"--instance={0}".format(instance)
|
"--instance={0}".format(instance)
|
||||||
@@ -445,9 +552,12 @@ class ProbackupTest(object):
|
|||||||
if backup_type:
|
if backup_type:
|
||||||
cmd_list += ["-b", backup_type]
|
cmd_list += ["-b", backup_type]
|
||||||
|
|
||||||
return self.run_pb(cmd_list + options)
|
return self.run_pb(cmd_list + options, async, gdb)
|
||||||
|
|
||||||
def restore_node(self, backup_dir, instance, node=False, data_dir=None, backup_id=None, options=[]):
|
def restore_node(
|
||||||
|
self, backup_dir, instance, node=False,
|
||||||
|
data_dir=None, backup_id=None, options=[]
|
||||||
|
):
|
||||||
if data_dir is None:
|
if data_dir is None:
|
||||||
data_dir = node.data_dir
|
data_dir = node.data_dir
|
||||||
|
|
||||||
@@ -462,7 +572,10 @@ class ProbackupTest(object):
|
|||||||
|
|
||||||
return self.run_pb(cmd_list + options)
|
return self.run_pb(cmd_list + options)
|
||||||
|
|
||||||
def show_pb(self, backup_dir, instance=None, backup_id=None, options=[], as_text=False):
|
def show_pb(
|
||||||
|
self, backup_dir, instance=None, backup_id=None,
|
||||||
|
options=[], as_text=False
|
||||||
|
):
|
||||||
|
|
||||||
backup_list = []
|
backup_list = []
|
||||||
specific_record = {}
|
specific_record = {}
|
||||||
@@ -485,7 +598,8 @@ class ProbackupTest(object):
|
|||||||
if instance is not None and backup_id is None:
|
if instance is not None and backup_id is None:
|
||||||
# cut header(ID, Mode, etc) from show as single string
|
# cut header(ID, Mode, etc) from show as single string
|
||||||
header = show_splitted[1:2][0]
|
header = show_splitted[1:2][0]
|
||||||
# cut backup records from show as single list with string for every backup record
|
# cut backup records from show as single list
|
||||||
|
# with string for every backup record
|
||||||
body = show_splitted[3:]
|
body = show_splitted[3:]
|
||||||
# inverse list so oldest record come first
|
# inverse list so oldest record come first
|
||||||
body = body[::-1]
|
body = body[::-1]
|
||||||
@@ -496,9 +610,11 @@ class ProbackupTest(object):
|
|||||||
if i == '':
|
if i == '':
|
||||||
header_split.remove(i)
|
header_split.remove(i)
|
||||||
continue
|
continue
|
||||||
header_split = [header_element.rstrip() for header_element in header_split]
|
header_split = [
|
||||||
|
header_element.rstrip() for header_element in header_split
|
||||||
|
]
|
||||||
for backup_record in body:
|
for backup_record in body:
|
||||||
# split string in list with string for every backup record element
|
# split list with str for every backup record element
|
||||||
backup_record_split = re.split(" +", backup_record)
|
backup_record_split = re.split(" +", backup_record)
|
||||||
# Remove empty items
|
# Remove empty items
|
||||||
for i in backup_record_split:
|
for i in backup_record_split:
|
||||||
@@ -507,7 +623,9 @@ class ProbackupTest(object):
|
|||||||
if len(header_split) != len(backup_record_split):
|
if len(header_split) != len(backup_record_split):
|
||||||
print(warning.format(
|
print(warning.format(
|
||||||
header=header, body=body,
|
header=header, body=body,
|
||||||
header_split=header_split, body_split=backup_record_split))
|
header_split=header_split,
|
||||||
|
body_split=backup_record_split)
|
||||||
|
)
|
||||||
exit(1)
|
exit(1)
|
||||||
new_dict = dict(zip(header_split, backup_record_split))
|
new_dict = dict(zip(header_split, backup_record_split))
|
||||||
backup_list.append(new_dict)
|
backup_list.append(new_dict)
|
||||||
@@ -517,7 +635,9 @@ class ProbackupTest(object):
|
|||||||
# and other garbage then reconstruct it as dictionary
|
# and other garbage then reconstruct it as dictionary
|
||||||
# print show_splitted
|
# print show_splitted
|
||||||
sanitized_show = [item for item in show_splitted if item]
|
sanitized_show = [item for item in show_splitted if item]
|
||||||
sanitized_show = [item for item in sanitized_show if not item.startswith('#')]
|
sanitized_show = [
|
||||||
|
item for item in sanitized_show if not item.startswith('#')
|
||||||
|
]
|
||||||
# print sanitized_show
|
# print sanitized_show
|
||||||
for line in sanitized_show:
|
for line in sanitized_show:
|
||||||
name, var = line.partition(" = ")[::2]
|
name, var = line.partition(" = ")[::2]
|
||||||
@@ -526,7 +646,10 @@ class ProbackupTest(object):
|
|||||||
specific_record[name.strip()] = var
|
specific_record[name.strip()] = var
|
||||||
return specific_record
|
return specific_record
|
||||||
|
|
||||||
def validate_pb(self, backup_dir, instance=None, backup_id=None, options=[]):
|
def validate_pb(
|
||||||
|
self, backup_dir, instance=None,
|
||||||
|
backup_id=None, options=[]
|
||||||
|
):
|
||||||
|
|
||||||
cmd_list = [
|
cmd_list = [
|
||||||
"validate",
|
"validate",
|
||||||
@@ -573,10 +696,11 @@ class ProbackupTest(object):
|
|||||||
out_dict[name] = var
|
out_dict[name] = var
|
||||||
return out_dict
|
return out_dict
|
||||||
|
|
||||||
|
|
||||||
def get_recovery_conf(self, node):
|
def get_recovery_conf(self, node):
|
||||||
out_dict = {}
|
out_dict = {}
|
||||||
with open(os.path.join(node.data_dir, "recovery.conf"), "r") as recovery_conf:
|
with open(
|
||||||
|
os.path.join(node.data_dir, "recovery.conf"), "r"
|
||||||
|
) as recovery_conf:
|
||||||
for line in recovery_conf:
|
for line in recovery_conf:
|
||||||
try:
|
try:
|
||||||
key, value = line.split("=")
|
key, value = line.split("=")
|
||||||
@@ -605,49 +729,82 @@ class ProbackupTest(object):
|
|||||||
if self.archive_compress:
|
if self.archive_compress:
|
||||||
node.append_conf(
|
node.append_conf(
|
||||||
"postgresql.auto.conf",
|
"postgresql.auto.conf",
|
||||||
"archive_command = '{0} archive-push -B {1} --instance={2} --compress --wal-file-path %p --wal-file-name %f'".format(
|
"archive_command = '{0} archive-push -B {1}"
|
||||||
self.probackup_path, backup_dir, instance))
|
" --instance={2} --compress --wal-file-path"
|
||||||
|
" %p --wal-file-name %f'".format(
|
||||||
|
self.probackup_path, backup_dir, instance)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
node.append_conf(
|
node.append_conf(
|
||||||
"postgresql.auto.conf",
|
"postgresql.auto.conf",
|
||||||
"archive_command = '{0} archive-push -B {1} --instance={2} --wal-file-path %p --wal-file-name %f'".format(
|
"archive_command = '{0} archive-push"
|
||||||
self.probackup_path, backup_dir, instance))
|
" -B {1} --instance={2} --wal-file-path"
|
||||||
#elif os.name == 'nt':
|
" %p --wal-file-name %f'".format(
|
||||||
|
self.probackup_path, backup_dir, instance)
|
||||||
|
)
|
||||||
|
# elif os.name == 'nt':
|
||||||
# node.append_conf(
|
# node.append_conf(
|
||||||
# "postgresql.auto.conf",
|
# "postgresql.auto.conf",
|
||||||
# "archive_command = 'copy %p {0}\\%f'".format(archive_dir)
|
# "archive_command = 'copy %p {0}\\%f'".format(archive_dir)
|
||||||
# )
|
# )
|
||||||
|
|
||||||
def set_replica(self, master, replica, replica_name='replica', synchronous=False):
|
def set_replica(
|
||||||
replica.append_conf('postgresql.auto.conf', 'port = {0}'.format(replica.port))
|
self, master, replica,
|
||||||
|
replica_name='replica',
|
||||||
|
synchronous=False
|
||||||
|
):
|
||||||
|
replica.append_conf(
|
||||||
|
"postgresql.auto.conf", "port = {0}".format(replica.port))
|
||||||
replica.append_conf('postgresql.auto.conf', 'hot_standby = on')
|
replica.append_conf('postgresql.auto.conf', 'hot_standby = on')
|
||||||
replica.append_conf('recovery.conf', "standby_mode = 'on'")
|
replica.append_conf('recovery.conf', "standby_mode = 'on'")
|
||||||
replica.append_conf('recovery.conf',
|
replica.append_conf(
|
||||||
"primary_conninfo = 'user={0} port={1} application_name={2} sslmode=prefer sslcompression=1'".format(
|
"recovery.conf",
|
||||||
self.user, master.port, replica_name))
|
"primary_conninfo = 'user={0} port={1} application_name={2}"
|
||||||
|
" sslmode=prefer sslcompression=1'".format(
|
||||||
|
self.user, master.port, replica_name)
|
||||||
|
)
|
||||||
if synchronous:
|
if synchronous:
|
||||||
master.append_conf('postgresql.auto.conf', "synchronous_standby_names='{0}'".format(replica_name))
|
master.append_conf(
|
||||||
master.append_conf('postgresql.auto.conf', "synchronous_commit='remote_apply'")
|
"postgresql.auto.conf",
|
||||||
|
"synchronous_standby_names='{0}'".format(replica_name)
|
||||||
|
)
|
||||||
|
master.append_conf(
|
||||||
|
'postgresql.auto.conf',
|
||||||
|
"synchronous_commit='remote_apply'"
|
||||||
|
)
|
||||||
master.reload()
|
master.reload()
|
||||||
|
|
||||||
def wrong_wal_clean(self, node, wal_size):
|
def wrong_wal_clean(self, node, wal_size):
|
||||||
wals_dir = os.path.join(self.backup_dir(node), "wal")
|
wals_dir = os.path.join(self.backup_dir(node), "wal")
|
||||||
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))]
|
wals = [
|
||||||
|
f for f in os.listdir(wals_dir) if os.path.isfile(
|
||||||
|
os.path.join(wals_dir, f))
|
||||||
|
]
|
||||||
wals.sort()
|
wals.sort()
|
||||||
file_path = os.path.join(wals_dir, wals[-1])
|
file_path = os.path.join(wals_dir, wals[-1])
|
||||||
if os.path.getsize(file_path) != wal_size:
|
if os.path.getsize(file_path) != wal_size:
|
||||||
os.remove(file_path)
|
os.remove(file_path)
|
||||||
|
|
||||||
def guc_wal_segment_size(self, node):
|
def guc_wal_segment_size(self, node):
|
||||||
var = node.execute("postgres", "select setting from pg_settings where name = 'wal_segment_size'")
|
var = node.execute(
|
||||||
|
"postgres",
|
||||||
|
"select setting from pg_settings where name = 'wal_segment_size'"
|
||||||
|
)
|
||||||
return int(var[0][0]) * self.guc_wal_block_size(node)
|
return int(var[0][0]) * self.guc_wal_block_size(node)
|
||||||
|
|
||||||
def guc_wal_block_size(self, node):
|
def guc_wal_block_size(self, node):
|
||||||
var = node.execute("postgres", "select setting from pg_settings where name = 'wal_block_size'")
|
var = node.execute(
|
||||||
|
"postgres",
|
||||||
|
"select setting from pg_settings where name = 'wal_block_size'"
|
||||||
|
)
|
||||||
return int(var[0][0])
|
return int(var[0][0])
|
||||||
|
|
||||||
def get_pgpro_edition(self, node):
|
def get_pgpro_edition(self, node):
|
||||||
if node.execute("postgres", "select exists(select 1 from pg_proc where proname = 'pgpro_edition')")[0][0]:
|
if node.execute(
|
||||||
|
"postgres",
|
||||||
|
"select exists (select 1 from"
|
||||||
|
" pg_proc where proname = 'pgpro_edition')"
|
||||||
|
)[0][0]:
|
||||||
var = node.execute("postgres", "select pgpro_edition()")
|
var = node.execute("postgres", "select pgpro_edition()")
|
||||||
return str(var[0][0])
|
return str(var[0][0])
|
||||||
else:
|
else:
|
||||||
@@ -659,7 +816,9 @@ class ProbackupTest(object):
|
|||||||
|
|
||||||
def switch_wal_segment(self, node):
|
def switch_wal_segment(self, node):
|
||||||
""" Execute pg_switch_wal/xlog() in given node"""
|
""" Execute pg_switch_wal/xlog() in given node"""
|
||||||
if testgres.version_to_num(node.safe_psql("postgres", "show server_version")) >= testgres.version_to_num('10.0'):
|
if testgres.version_to_num(
|
||||||
|
node.safe_psql("postgres", "show server_version")
|
||||||
|
) >= testgres.version_to_num('10.0'):
|
||||||
node.safe_psql("postgres", "select pg_switch_wal()")
|
node.safe_psql("postgres", "select pg_switch_wal()")
|
||||||
else:
|
else:
|
||||||
node.safe_psql("postgres", "select pg_switch_xlog()")
|
node.safe_psql("postgres", "select pg_switch_xlog()")
|
||||||
@@ -674,29 +833,48 @@ class ProbackupTest(object):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
shutil.rmtree(os.path.join(self.tmp_path, module_name, fname),
|
shutil.rmtree(
|
||||||
ignore_errors=True)
|
os.path.join(
|
||||||
|
self.tmp_path,
|
||||||
|
module_name,
|
||||||
|
fname
|
||||||
|
),
|
||||||
|
ignore_errors=True
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
os.rmdir(os.path.join(self.tmp_path, module_name))
|
os.rmdir(os.path.join(self.tmp_path, module_name))
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def pgdata_content(self, directory):
|
def pgdata_content(self, directory):
|
||||||
""" return dict with directory content. TAKE IT AFTER CHECKPOINT or BACKUP"""
|
""" return dict with directory content. "
|
||||||
dirs_to_ignore = ['pg_xlog', 'pg_wal', 'pg_log', 'pg_stat_tmp', 'pg_subtrans', 'pg_notify']
|
" TAKE IT AFTER CHECKPOINT or BACKUP"""
|
||||||
files_to_ignore = ['postmaster.pid', 'postmaster.opts', 'pg_internal.init', 'postgresql.auto.conf']
|
dirs_to_ignore = [
|
||||||
suffixes_to_ignore = ('_ptrack', 'ptrack_control', 'pg_control', 'ptrack_init')
|
'pg_xlog', 'pg_wal', 'pg_log',
|
||||||
|
'pg_stat_tmp', 'pg_subtrans', 'pg_notify'
|
||||||
|
]
|
||||||
|
files_to_ignore = [
|
||||||
|
'postmaster.pid', 'postmaster.opts',
|
||||||
|
'pg_internal.init', 'postgresql.auto.conf'
|
||||||
|
]
|
||||||
|
suffixes_to_ignore = (
|
||||||
|
'_ptrack', 'ptrack_control',
|
||||||
|
'pg_control', 'ptrack_init'
|
||||||
|
)
|
||||||
directory_dict = {}
|
directory_dict = {}
|
||||||
directory_dict['pgdata'] = directory
|
directory_dict['pgdata'] = directory
|
||||||
directory_dict['files'] = {}
|
directory_dict['files'] = {}
|
||||||
for root, dirs, files in os.walk(directory, followlinks=True):
|
for root, dirs, files in os.walk(directory, followlinks=True):
|
||||||
dirs[:] = [d for d in dirs if d not in dirs_to_ignore]
|
dirs[:] = [d for d in dirs if d not in dirs_to_ignore]
|
||||||
for file in files:
|
for file in files:
|
||||||
if file in files_to_ignore or file.endswith(suffixes_to_ignore):
|
if file in files_to_ignore or file.endswith(
|
||||||
|
suffixes_to_ignore
|
||||||
|
):
|
||||||
continue
|
continue
|
||||||
file = os.path.join(root,file)
|
file = os.path.join(root, file)
|
||||||
file_relpath = os.path.relpath(file, directory)
|
file_relpath = os.path.relpath(file, directory)
|
||||||
directory_dict['files'][file_relpath] = hashlib.md5(open(file, 'rb').read()).hexdigest()
|
directory_dict['files'][file_relpath] = hashlib.md5(
|
||||||
|
open(file, 'rb').read()).hexdigest()
|
||||||
return directory_dict
|
return directory_dict
|
||||||
|
|
||||||
def compare_pgdata(self, original_pgdata, restored_pgdata):
|
def compare_pgdata(self, original_pgdata, restored_pgdata):
|
||||||
@@ -705,14 +883,114 @@ class ProbackupTest(object):
|
|||||||
error_message = ''
|
error_message = ''
|
||||||
for file in original_pgdata['files']:
|
for file in original_pgdata['files']:
|
||||||
if file in restored_pgdata['files']:
|
if file in restored_pgdata['files']:
|
||||||
if original_pgdata['files'][file] != restored_pgdata['files'][file]:
|
if (
|
||||||
error_message += '\nChecksumm mismatch.\n File_old: {0}\n Checksumm_old: {1}\n File_new: {2}\n Checksumm_new: {3}\n'.format(
|
original_pgdata['files'][file] !=
|
||||||
|
restored_pgdata['files'][file]
|
||||||
|
):
|
||||||
|
error_message += '\nChecksumm mismatch.\n'
|
||||||
|
' File_old: {0}\n Checksumm_old: {1}\n'
|
||||||
|
' File_new: {2}\n Checksumm_new: {3}\n'.format(
|
||||||
os.path.join(original_pgdata['pgdata'], file),
|
os.path.join(original_pgdata['pgdata'], file),
|
||||||
original_pgdata['files'][file],
|
original_pgdata['files'][file],
|
||||||
os.path.join(restored_pgdata['pgdata'], file),
|
os.path.join(restored_pgdata['pgdata'], file),
|
||||||
restored_pgdata['files'][file])
|
restored_pgdata['files'][file]
|
||||||
|
)
|
||||||
fail = True
|
fail = True
|
||||||
else:
|
else:
|
||||||
error_message += '\nFile dissappearance. File: {0}/{1}'.format(restored_pgdata['pgdata'], file)
|
error_message += '\nFile dissappearance.'
|
||||||
|
' File: {0}/{1}'.format(restored_pgdata['pgdata'], file)
|
||||||
fail = True
|
fail = True
|
||||||
self.assertFalse(fail, error_message)
|
self.assertFalse(fail, error_message)
|
||||||
|
|
||||||
|
|
||||||
|
class GdbException(Exception):
|
||||||
|
def __init__(self, message=False):
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '\n ERROR: {0}\n'.format(repr(self.message))
|
||||||
|
|
||||||
|
|
||||||
|
class GDBobj(ProbackupTest):
|
||||||
|
def __init__(self, cmd, verbose):
|
||||||
|
self.base_cmd = [
|
||||||
|
'/usr/bin/gdb',
|
||||||
|
'--interpreter',
|
||||||
|
'mi2',
|
||||||
|
'--args'
|
||||||
|
] + cmd
|
||||||
|
self.verbose = verbose
|
||||||
|
if self.verbose:
|
||||||
|
print([' '.join(map(str, self.base_cmd))])
|
||||||
|
|
||||||
|
self.proc = subprocess.Popen(
|
||||||
|
self.base_cmd,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
bufsize=0, universal_newlines=True
|
||||||
|
)
|
||||||
|
self.gdb_pid = self.proc.pid
|
||||||
|
|
||||||
|
# discard stuff
|
||||||
|
while True:
|
||||||
|
line = self.proc.stdout.readline()
|
||||||
|
if not line.startswith('(gdb)'):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
def set_breakpoint(self, location):
|
||||||
|
result = self._execute('break ' + location)
|
||||||
|
success = False
|
||||||
|
for line in result.splitlines():
|
||||||
|
# Success
|
||||||
|
if line.startswith('~"Breakpoint'):
|
||||||
|
success = True
|
||||||
|
break
|
||||||
|
if line.startswith('^error') or line.startswith('(gdb)'):
|
||||||
|
break
|
||||||
|
# discard initial data from pipe,
|
||||||
|
# is there a way to do it a less derpy way?
|
||||||
|
if line.startswith('&'):
|
||||||
|
if line.startswith('&"break'):
|
||||||
|
pass
|
||||||
|
if line.startswith('&"Function'):
|
||||||
|
GdbBreakpointException = GdbException()
|
||||||
|
raise GdbBreakpointException(line)
|
||||||
|
if line.startswith('&"No line'):
|
||||||
|
GdbBreakpointException = GdbException()
|
||||||
|
raise GdbBreakpointException(line)
|
||||||
|
return success
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
result = self._execute('run')
|
||||||
|
for line in result.splitlines():
|
||||||
|
if line.startswith('*stopped,reason="breakpoint-hit"'):
|
||||||
|
return 'breakpoint-hit'
|
||||||
|
if line.startswith('*stopped,reason="exited-normally"'):
|
||||||
|
return 'exit correct'
|
||||||
|
|
||||||
|
def continue_execution(self, sync=True):
|
||||||
|
result = self._execute('continue')
|
||||||
|
for line in result.splitlines():
|
||||||
|
if line.startswith('*stopped,reason="breakpoint-hit"'):
|
||||||
|
return 'breakpoint-hit'
|
||||||
|
if line.startswith('*stopped,reason="exited-normally"'):
|
||||||
|
return 'exit correct'
|
||||||
|
|
||||||
|
# use for breakpoint, run, continue
|
||||||
|
def _execute(self, cmd):
|
||||||
|
output = ''
|
||||||
|
self.proc.stdin.flush()
|
||||||
|
self.proc.stdin.write(cmd + '\n')
|
||||||
|
self.proc.stdin.flush()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
line = self.proc.stdout.readline()
|
||||||
|
output = output + line
|
||||||
|
if self.verbose:
|
||||||
|
print(line)
|
||||||
|
if line == '^done\n' or line.startswith('*stopped'):
|
||||||
|
break
|
||||||
|
return output
|
||||||
|
@@ -16,7 +16,6 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.expectedFailure
|
# @unittest.expectedFailure
|
||||||
def test_ptrack_enable(self):
|
def test_ptrack_enable(self):
|
||||||
"""make ptrack without full backup, should result in error"""
|
"""make ptrack without full backup, should result in error"""
|
||||||
self.maxDiff = None
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
@@ -46,7 +45,6 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.expectedFailure
|
# @unittest.expectedFailure
|
||||||
def test_ptrack_disable(self):
|
def test_ptrack_disable(self):
|
||||||
"""Take full backup, disable ptrack restart postgresql, enable ptrack, restart postgresql, take ptrack backup which should fail"""
|
"""Take full backup, disable ptrack restart postgresql, enable ptrack, restart postgresql, take ptrack backup which should fail"""
|
||||||
self.maxDiff = None
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
@@ -89,6 +87,59 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# Clean after yourself
|
# Clean after yourself
|
||||||
self.del_test_dir(module_name, fname)
|
self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
|
# @unittest.skip("skip")
|
||||||
|
def test_ptrack_get_block(self):
|
||||||
|
"""make node, make full and ptrack stream backups, restore them and check data correctness"""
|
||||||
|
fname = self.id().split('.')[3]
|
||||||
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
|
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
|
set_replication=True,
|
||||||
|
initdb_params=['--data-checksums'],
|
||||||
|
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '300s', 'ptrack_enable': 'on'}
|
||||||
|
)
|
||||||
|
self.init_pb(backup_dir)
|
||||||
|
self.add_instance(backup_dir, 'node', node)
|
||||||
|
self.set_archiving(backup_dir, 'node', node)
|
||||||
|
node.start()
|
||||||
|
|
||||||
|
node.safe_psql(
|
||||||
|
"postgres",
|
||||||
|
"create table t_heap as select i as id from generate_series(0,1) i")
|
||||||
|
|
||||||
|
self.backup_node(backup_dir, 'node', node, options=['--stream'])
|
||||||
|
gdb = self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-file=verbose'], gdb=True)
|
||||||
|
|
||||||
|
if gdb.set_breakpoint('make_pagemap_from_ptrack'):
|
||||||
|
result = gdb.run()
|
||||||
|
else:
|
||||||
|
self.assertTrue(False, 'Cannot set breakpoint')
|
||||||
|
|
||||||
|
if result != 'breakpoint-hit':
|
||||||
|
print('Error in hitting breaking point')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
node.safe_psql(
|
||||||
|
"postgres",
|
||||||
|
"update t_heap set id = 100500")
|
||||||
|
print(node.safe_psql(
|
||||||
|
"postgres",
|
||||||
|
"select * from t_heap"))
|
||||||
|
|
||||||
|
if not gdb.continue_execution():
|
||||||
|
print('Error in continue_execution')
|
||||||
|
|
||||||
|
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
|
||||||
|
|
||||||
|
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||||
|
node.cleanup()
|
||||||
|
self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
|
||||||
|
|
||||||
|
node.start()
|
||||||
|
self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"))
|
||||||
|
|
||||||
|
# Clean after yourself
|
||||||
|
self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_ptrack_stream(self):
|
def test_ptrack_stream(self):
|
||||||
"""make node, make full and ptrack stream backups, restore them and check data correctness"""
|
"""make node, make full and ptrack stream backups, restore them and check data correctness"""
|
||||||
@@ -118,7 +169,7 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
"postgres",
|
"postgres",
|
||||||
"insert into t_heap select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i")
|
"insert into t_heap select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i")
|
||||||
ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||||
ptrack_backup_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
|
ptrack_backup_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-file=verbose'])
|
||||||
pgdata = self.pgdata_content(node.data_dir)
|
pgdata = self.pgdata_content(node.data_dir)
|
||||||
|
|
||||||
# Drop Node
|
# Drop Node
|
||||||
@@ -450,7 +501,6 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_alter_table_set_tablespace_ptrack(self):
|
def test_alter_table_set_tablespace_ptrack(self):
|
||||||
"""Make node, create tablespace with table, take full backup, alter tablespace location, take ptrack backup, restore database."""
|
"""Make node, create tablespace with table, take full backup, alter tablespace location, take ptrack backup, restore database."""
|
||||||
self.maxDiff = None
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
@@ -512,7 +562,9 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_alter_database_set_tablespace_ptrack(self):
|
def test_alter_database_set_tablespace_ptrack(self):
|
||||||
"""Make node, create tablespace with database, take full backup, alter tablespace location, take ptrack backup, restore database."""
|
"""Make node, create tablespace with database,"
|
||||||
|
" take full backup, alter tablespace location,"
|
||||||
|
" take ptrack backup, restore database."""
|
||||||
self.maxDiff = None
|
self.maxDiff = None
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
|
Reference in New Issue
Block a user