diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index f019527e..b6a04f1b 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -11,38 +11,38 @@ import pwd idx_ptrack = { -'t_heap': { - 'type': 'heap' + 't_heap': { + 'type': 'heap' }, -'t_btree': { - 'type': 'btree', - 'column': 'text', - 'relation': 't_heap' + 't_btree': { + 'type': 'btree', + 'column': 'text', + 'relation': 't_heap' }, -'t_seq': { - 'type': 'seq', - 'column': 't_seq', - 'relation': 't_heap' + 't_seq': { + 'type': 'seq', + 'column': 't_seq', + 'relation': 't_heap' }, -'t_spgist': { - 'type': 'spgist', - 'column': 'text', - 'relation': 't_heap' + 't_spgist': { + 'type': 'spgist', + 'column': 'text', + 'relation': 't_heap' }, -'t_brin': { - 'type': 'brin', - 'column': 'text', - 'relation': 't_heap' + 't_brin': { + 'type': 'brin', + 'column': 'text', + 'relation': 't_heap' }, -'t_gist': { - 'type': 'gist', - 'column': 'tsvector', - 'relation': 't_heap' + 't_gist': { + 'type': 'gist', + 'column': 'tsvector', + 'relation': 't_heap' }, -'t_gin': { - 'type': 'gin', - 'column': 'tsvector', - 'relation': 't_heap' + 't_gin': { + 'type': 'gin', + 'column': 'tsvector', + 'relation': 't_heap' }, } @@ -71,15 +71,6 @@ Splitted Body {body_split} """ -# You can lookup error message and cmdline in exception object attributes -class ProbackupException(Exception): - def __init__(self, message, cmd): - self.message = message - self.cmd = cmd - #need that to make second raise - def __str__(self): - return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd) - def dir_files(base_dir): out_list = [] @@ -87,19 +78,36 @@ def dir_files(base_dir): if dir_name != base_dir: out_list.append(os.path.relpath(dir_name, base_dir)) for fname in file_list: - out_list.append(os.path.relpath(os.path.join(dir_name, fname), base_dir)) + out_list.append( + os.path.relpath(os.path.join( + dir_name, fname), base_dir) + ) out_list.sort() return out_list + def is_enterprise(): -# pg_config --help - p = subprocess.Popen([os.environ['PG_CONFIG'], '--help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # pg_config --help + p = subprocess.Popen( + [os.environ['PG_CONFIG'], '--help'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) if 'postgrespro.ru' in p.communicate()[0]: return True else: return False +class ProbackupException(Exception): + def __init__(self, message, cmd): + self.message = message + self.cmd = cmd + + def __str__(self): + return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd) + + class ProbackupTest(object): # Class attributes enterprise = is_enterprise() @@ -147,8 +155,12 @@ class ProbackupTest(object): self.archive_compress = True self.helpers_path = os.path.dirname(os.path.realpath(__file__)) - self.dir_path = os.path.abspath(os.path.join(self.helpers_path, os.pardir)) - self.tmp_path = os.path.abspath(os.path.join(self.dir_path, 'tmp_dirs')) + self.dir_path = os.path.abspath( + os.path.join(self.helpers_path, os.pardir) + ) + self.tmp_path = os.path.abspath( + os.path.join(self.dir_path, 'tmp_dirs') + ) try: os.makedirs(os.path.join(self.dir_path, 'tmp_dirs')) except: @@ -157,7 +169,10 @@ class ProbackupTest(object): self.user = self.get_username() self.probackup_path = None if "PGPROBACKUPBIN" in self.test_env: - if os.path.isfile(self.test_env["PGPROBACKUPBIN"]) and os.access(self.test_env["PGPROBACKUPBIN"], os.X_OK): + if ( + os.path.isfile(self.test_env["PGPROBACKUPBIN"]) and + os.access(self.test_env["PGPROBACKUPBIN"], os.X_OK) + ): self.probackup_path = self.test_env["PGPROBACKUPBIN"] else: if self.verbose: @@ -184,10 +199,12 @@ class ProbackupTest(object): node.append_conf("postgresql.auto.conf", "fsync = on") node.append_conf("postgresql.auto.conf", "wal_level = minimal") - node.append_conf("postgresql.auto.conf", "log_line_prefix = '%t [%p]: [%l-1] '") + node.append_conf( + "postgresql.auto.conf", "log_line_prefix = '%t [%p]: [%l-1] '") node.append_conf("postgresql.auto.conf", "log_statement = none") node.append_conf("postgresql.auto.conf", "log_duration = on") - node.append_conf("postgresql.auto.conf", "log_min_duration_statement = 0") + node.append_conf( + "postgresql.auto.conf", "log_min_duration_statement = 0") node.append_conf("postgresql.auto.conf", "log_connections = on") node.append_conf("postgresql.auto.conf", "log_disconnections = on") @@ -203,36 +220,50 @@ class ProbackupTest(object): def create_tblspace_in_node(self, node, tblspc_name, cfs=False): res = node.execute( - "postgres", "select exists (select 1 from pg_tablespace where spcname = '{0}')".format( - tblspc_name)) + "postgres", + "select exists" + " (select 1 from pg_tablespace where spcname = '{0}')".format( + tblspc_name) + ) # Check that tablespace with name 'tblspc_name' do not exists already - self.assertFalse(res[0][0], 'Tablespace "{0}" already exists'.format(tblspc_name)) + self.assertFalse( + res[0][0], + 'Tablespace "{0}" already exists'.format(tblspc_name) + ) tblspc_path = os.path.join(node.base_dir, '{0}'.format(tblspc_name)) - cmd = "CREATE TABLESPACE {0} LOCATION '{1}'".format(tblspc_name, tblspc_path) + cmd = "CREATE TABLESPACE {0} LOCATION '{1}'".format( + tblspc_name, tblspc_path) if cfs: cmd += " with (compression=true)" os.makedirs(tblspc_path) res = node.safe_psql("postgres", cmd) # Check that tablespace was successfully created - # self.assertEqual(res[0], 0, 'Failed to create tablespace with cmd: {0}'.format(cmd)) + # self.assertEqual( + # res[0], 0, + # 'Failed to create tablespace with cmd: {0}'.format(cmd)) def get_tblspace_path(self, node, tblspc_name): return os.path.join(node.base_dir, tblspc_name) def get_fork_size(self, node, fork_name): - return node.execute("postgres", + return node.execute( + "postgres", "select pg_relation_size('{0}')/8192".format(fork_name))[0][0] def get_fork_path(self, node, fork_name): - return os.path.join(node.base_dir, 'data', - node.execute("postgres", "select pg_relation_filepath('{0}')".format(fork_name))[0][0]) + return os.path.join( + node.base_dir, 'data', node.execute( + "postgres", + "select pg_relation_filepath('{0}')".format( + fork_name))[0][0] + ) def get_md5_per_page_for_fork(self, file, size_in_pages): pages_per_segment = {} md5_per_page = {} nsegments = size_in_pages/131072 - if size_in_pages%131072 != 0: + if size_in_pages % 131072 != 0: nsegments = nsegments + 1 size = size_in_pages @@ -250,12 +281,15 @@ class ProbackupTest(object): start_page = 0 end_page = pages_per_segment[segment_number] else: - file_desc = os.open(file+".{0}".format(segment_number), os.O_RDONLY) + file_desc = os.open( + file+".{0}".format(segment_number), os.O_RDONLY + ) start_page = max(md5_per_page)+1 end_page = end_page + pages_per_segment[segment_number] for page in range(start_page, end_page): - md5_per_page[page] = hashlib.md5(os.read(file_desc, 8192)).hexdigest() + md5_per_page[page] = hashlib.md5( + os.read(file_desc, 8192)).hexdigest() offset += 8192 os.lseek(file_desc, offset, 0) os.close(file_desc) @@ -273,7 +307,7 @@ class ProbackupTest(object): page_body_size = 8192-header_size byte_size = os.path.getsize(file + '_ptrack') npages = byte_size/8192 - if byte_size%8192 != 0: + if byte_size % 8192 != 0: print('Ptrack page is not 8k aligned') sys.exit(1) @@ -283,14 +317,17 @@ class ProbackupTest(object): offset = 8192*page+header_size os.lseek(file, offset, 0) lots_of_bytes = os.read(file, page_body_size) - byte_list = [lots_of_bytes[i:i+1] for i in range(len(lots_of_bytes))] + byte_list = [ + lots_of_bytes[i:i+1] for i in range(len(lots_of_bytes)) + ] for byte in byte_list: - #byte_inverted = bin(int(byte, base=16))[2:][::-1] - #bits = (byte >> x) & 1 for x in range(7, -1, -1) + # byte_inverted = bin(int(byte, base=16))[2:][::-1] + # bits = (byte >> x) & 1 for x in range(7, -1, -1) byte_inverted = bin(ord(byte))[2:].rjust(8, '0')[::-1] for bit in byte_inverted: - # if len(ptrack_bits_for_fork) < size: - ptrack_bits_for_fork.append(int(bit)) + # if len(ptrack_bits_for_fork) < size: + ptrack_bits_for_fork.append(int(bit)) + os.close(file) return ptrack_bits_for_fork @@ -306,83 +343,150 @@ class ProbackupTest(object): # Ptrack should be equal to 1 if idx_dict['ptrack'][PageNum] != 1: if self.verbose: - print('Page Number {0} of type {1} was added, but ptrack value is {2}. THIS IS BAD'.format( - PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum])) + print( + 'Page Number {0} of type {1} was added,' + ' but ptrack value is {2}. THIS IS BAD'.format( + PageNum, idx_dict['type'], + idx_dict['ptrack'][PageNum]) + ) # print(idx_dict) success = False continue if PageNum not in idx_dict['new_pages']: # Page is not present now, meaning that relation got smaller - # Ptrack should be equal to 0, We are not freaking out about false positive stuff + # Ptrack should be equal to 0, + # We are not freaking out about false positive stuff if idx_dict['ptrack'][PageNum] != 0: if self.verbose: - print('Page Number {0} of type {1} was deleted, but ptrack value is {2}'.format( - PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum])) + print( + 'Page Number {0} of type {1} was deleted,' + ' but ptrack value is {2}'.format( + PageNum, idx_dict['type'], + idx_dict['ptrack'][PageNum]) + ) continue - # Ok, all pages in new_pages that do not have corresponding page in old_pages - # are been dealt with. We can now safely proceed to comparing old and new pages - if idx_dict['new_pages'][PageNum] != idx_dict['old_pages'][PageNum]: - # Page has been changed, meaning that ptrack should be equal to 1 + # Ok, all pages in new_pages that do not have + # corresponding page in old_pages are been dealt with. + # We can now safely proceed to comparing old and new pages + if idx_dict['new_pages'][ + PageNum] != idx_dict['old_pages'][PageNum]: + # Page has been changed, + # meaning that ptrack should be equal to 1 if idx_dict['ptrack'][PageNum] != 1: if self.verbose: - print('Page Number {0} of type {1} was changed, but ptrack value is {2}. THIS IS BAD'.format( - PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum])) - print("\n Old checksumm: {0}\n New checksumm: {1}".format(idx_dict['old_pages'][PageNum], idx_dict['new_pages'][PageNum])) - #print(idx_dict) + print( + 'Page Number {0} of type {1} was changed,' + ' but ptrack value is {2}. THIS IS BAD'.format( + PageNum, idx_dict['type'], + idx_dict['ptrack'][PageNum]) + ) + print( + "\n Old checksumm: {0}\n" + " New checksumm: {1}".format( + idx_dict['old_pages'][PageNum], + idx_dict['new_pages'][PageNum]) + ) + if PageNum == 0 and idx_dict['type'] == 'spgist': if self.verbose: - print('SPGIST is a special snowflake, so don`t fret about losing ptrack for blknum 0') + print( + 'SPGIST is a special snowflake, so don`t' + 'fret about losing ptrack for blknum 0' + ) continue success = False else: - # Page has not been changed, meaning that ptrack should be equal to 0 + # Page has not been changed, + # meaning that ptrack should be equal to 0 if idx_dict['ptrack'][PageNum] != 0: if self.verbose: - print('Page Number {0} of type {1} was not changed, but ptrack value is {2}'.format( - PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum])) - #print(idx_dict) - #self.assertEqual(success, True, 'Ptrack does not correspond to state of its pages.\n Gory Details: \n{0}'.format( - # idx_dict['type'], idx_dict)) + print( + 'Page Number {0} of type {1} was not changed,' + ' but ptrack value is {2}'.format( + PageNum, idx_dict['type'], + idx_dict['ptrack'][PageNum] + ) + ) + + self.assertTrue( + success, 'Ptrack does not correspond to state' + ' of its own pages.\n Gory Details: \n{0}'.format( + idx_dict['type'], idx_dict + ) + ) def check_ptrack_recovery(self, idx_dict): size = idx_dict['size'] for PageNum in range(size): if idx_dict['ptrack'][PageNum] != 1: - self.assertTrue(False, 'Recovery for Page Number {0} of Type {1} was conducted, but ptrack value is {2}. THIS IS BAD\n IDX_DICT: {3}'.format( - PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum], idx_dict)) + self.assertTrue( + False, + 'Recovery for Page Number {0} of Type {1}' + ' was conducted, but ptrack value is {2}.' + ' THIS IS BAD\n IDX_DICT: {3}'.format( + PageNum, idx_dict['type'], + idx_dict['ptrack'][PageNum], + idx_dict + ) + ) def check_ptrack_clean(self, idx_dict, size): for PageNum in range(size): if idx_dict['ptrack'][PageNum] != 0: - self.assertTrue(False, 'Ptrack for Page Number {0} of Type {1} should be clean, but ptrack value is {2}.\n THIS IS BAD\n IDX_DICT: {3}'.format( - PageNum, idx_dict['type'], idx_dict['ptrack'][PageNum], idx_dict)) + self.assertTrue( + False, + 'Ptrack for Page Number {0} of Type {1}' + ' should be clean, but ptrack value is {2}.' + '\n THIS IS BAD\n IDX_DICT: {3}'.format( + PageNum, + idx_dict['type'], + idx_dict['ptrack'][PageNum], + idx_dict + ) + ) - def run_pb(self, command): + def run_pb(self, command, async=False, gdb=False): try: - self.cmd = [' '.join(map(str,[self.probackup_path] + command))] + self.cmd = [' '.join(map(str, [self.probackup_path] + command))] if self.verbose: print(self.cmd) - self.output = subprocess.check_output( - [self.probackup_path] + command, - stderr=subprocess.STDOUT, - env=self.test_env - ).decode("utf-8") - if command[0] == 'backup': - # return backup ID - for line in self.output.splitlines(): - if 'INFO: Backup' and 'completed' in line: - return line.split()[2] + if gdb: + return GDBobj([self.probackup_path] + command, verbose=True) + if async: + return subprocess.Popen( + self.cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=self.test_env + ) else: - return self.output + self.output = subprocess.check_output( + [self.probackup_path] + command, + stderr=subprocess.STDOUT, + env=self.test_env + ).decode("utf-8") + if command[0] == 'backup': + # return backup ID + for line in self.output.splitlines(): + if 'INFO: Backup' and 'completed' in line: + return line.split()[2] + else: + return self.output except subprocess.CalledProcessError as e: - raise ProbackupException(e.output.decode("utf-8"), self.cmd) + raise ProbackupException(e.output.decode("utf-8"), self.cmd) def run_binary(self, command, async=False): try: if async: if self.verbose: print(command) - return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.test_env) + return subprocess.Popen( + command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=self.test_env + ) else: if self.verbose: print(command) @@ -393,7 +497,7 @@ class ProbackupTest(object): ).decode("utf-8") return self.output except subprocess.CalledProcessError as e: - raise ProbackupException(e.output.decode("utf-8"), command) + raise ProbackupException(e.output.decode("utf-8"), command) def init_pb(self, backup_dir): @@ -423,7 +527,10 @@ class ProbackupTest(object): def clean_pb(self, backup_dir): shutil.rmtree(backup_dir, ignore_errors=True) - def backup_node(self, backup_dir, instance, node, data_dir=False, backup_type="full", options=[]): + def backup_node( + self, backup_dir, instance, node, data_dir=False, + backup_type="full", options=[], async=False, gdb=False + ): if not node and not data_dir: print('You must provide ether node or data_dir for backup') exit(1) @@ -437,7 +544,7 @@ class ProbackupTest(object): cmd_list = [ "backup", "-B", backup_dir, -# "-D", pgdata, + # "-D", pgdata, "-p", "%i" % node.port, "-d", "postgres", "--instance={0}".format(instance) @@ -445,9 +552,12 @@ class ProbackupTest(object): if backup_type: cmd_list += ["-b", backup_type] - return self.run_pb(cmd_list + options) + return self.run_pb(cmd_list + options, async, gdb) - def restore_node(self, backup_dir, instance, node=False, data_dir=None, backup_id=None, options=[]): + def restore_node( + self, backup_dir, instance, node=False, + data_dir=None, backup_id=None, options=[] + ): if data_dir is None: data_dir = node.data_dir @@ -462,7 +572,10 @@ class ProbackupTest(object): return self.run_pb(cmd_list + options) - def show_pb(self, backup_dir, instance=None, backup_id=None, options=[], as_text=False): + def show_pb( + self, backup_dir, instance=None, backup_id=None, + options=[], as_text=False + ): backup_list = [] specific_record = {} @@ -485,7 +598,8 @@ class ProbackupTest(object): if instance is not None and backup_id is None: # cut header(ID, Mode, etc) from show as single string header = show_splitted[1:2][0] - # cut backup records from show as single list with string for every backup record + # cut backup records from show as single list + # with string for every backup record body = show_splitted[3:] # inverse list so oldest record come first body = body[::-1] @@ -496,9 +610,11 @@ class ProbackupTest(object): if i == '': header_split.remove(i) continue - header_split = [header_element.rstrip() for header_element in header_split] + header_split = [ + header_element.rstrip() for header_element in header_split + ] for backup_record in body: - # split string in list with string for every backup record element + # split list with str for every backup record element backup_record_split = re.split(" +", backup_record) # Remove empty items for i in backup_record_split: @@ -507,7 +623,9 @@ class ProbackupTest(object): if len(header_split) != len(backup_record_split): print(warning.format( header=header, body=body, - header_split=header_split, body_split=backup_record_split)) + header_split=header_split, + body_split=backup_record_split) + ) exit(1) new_dict = dict(zip(header_split, backup_record_split)) backup_list.append(new_dict) @@ -517,7 +635,9 @@ class ProbackupTest(object): # and other garbage then reconstruct it as dictionary # print show_splitted sanitized_show = [item for item in show_splitted if item] - sanitized_show = [item for item in sanitized_show if not item.startswith('#')] + sanitized_show = [ + item for item in sanitized_show if not item.startswith('#') + ] # print sanitized_show for line in sanitized_show: name, var = line.partition(" = ")[::2] @@ -526,7 +646,10 @@ class ProbackupTest(object): specific_record[name.strip()] = var return specific_record - def validate_pb(self, backup_dir, instance=None, backup_id=None, options=[]): + def validate_pb( + self, backup_dir, instance=None, + backup_id=None, options=[] + ): cmd_list = [ "validate", @@ -573,10 +696,11 @@ class ProbackupTest(object): out_dict[name] = var return out_dict - def get_recovery_conf(self, node): out_dict = {} - with open(os.path.join(node.data_dir, "recovery.conf"), "r") as recovery_conf: + with open( + os.path.join(node.data_dir, "recovery.conf"), "r" + ) as recovery_conf: for line in recovery_conf: try: key, value = line.split("=") @@ -605,49 +729,82 @@ class ProbackupTest(object): if self.archive_compress: node.append_conf( "postgresql.auto.conf", - "archive_command = '{0} archive-push -B {1} --instance={2} --compress --wal-file-path %p --wal-file-name %f'".format( - self.probackup_path, backup_dir, instance)) + "archive_command = '{0} archive-push -B {1}" + " --instance={2} --compress --wal-file-path" + " %p --wal-file-name %f'".format( + self.probackup_path, backup_dir, instance) + ) else: node.append_conf( "postgresql.auto.conf", - "archive_command = '{0} archive-push -B {1} --instance={2} --wal-file-path %p --wal-file-name %f'".format( - self.probackup_path, backup_dir, instance)) - #elif os.name == 'nt': + "archive_command = '{0} archive-push" + " -B {1} --instance={2} --wal-file-path" + " %p --wal-file-name %f'".format( + self.probackup_path, backup_dir, instance) + ) + # elif os.name == 'nt': # node.append_conf( # "postgresql.auto.conf", # "archive_command = 'copy %p {0}\\%f'".format(archive_dir) # ) - def set_replica(self, master, replica, replica_name='replica', synchronous=False): - replica.append_conf('postgresql.auto.conf', 'port = {0}'.format(replica.port)) + def set_replica( + self, master, replica, + replica_name='replica', + synchronous=False + ): + replica.append_conf( + "postgresql.auto.conf", "port = {0}".format(replica.port)) replica.append_conf('postgresql.auto.conf', 'hot_standby = on') replica.append_conf('recovery.conf', "standby_mode = 'on'") - replica.append_conf('recovery.conf', - "primary_conninfo = 'user={0} port={1} application_name={2} sslmode=prefer sslcompression=1'".format( - self.user, master.port, replica_name)) + replica.append_conf( + "recovery.conf", + "primary_conninfo = 'user={0} port={1} application_name={2}" + " sslmode=prefer sslcompression=1'".format( + self.user, master.port, replica_name) + ) if synchronous: - master.append_conf('postgresql.auto.conf', "synchronous_standby_names='{0}'".format(replica_name)) - master.append_conf('postgresql.auto.conf', "synchronous_commit='remote_apply'") + master.append_conf( + "postgresql.auto.conf", + "synchronous_standby_names='{0}'".format(replica_name) + ) + master.append_conf( + 'postgresql.auto.conf', + "synchronous_commit='remote_apply'" + ) master.reload() def wrong_wal_clean(self, node, wal_size): wals_dir = os.path.join(self.backup_dir(node), "wal") - wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] + wals = [ + f for f in os.listdir(wals_dir) if os.path.isfile( + os.path.join(wals_dir, f)) + ] wals.sort() file_path = os.path.join(wals_dir, wals[-1]) if os.path.getsize(file_path) != wal_size: os.remove(file_path) def guc_wal_segment_size(self, node): - var = node.execute("postgres", "select setting from pg_settings where name = 'wal_segment_size'") + var = node.execute( + "postgres", + "select setting from pg_settings where name = 'wal_segment_size'" + ) return int(var[0][0]) * self.guc_wal_block_size(node) def guc_wal_block_size(self, node): - var = node.execute("postgres", "select setting from pg_settings where name = 'wal_block_size'") + var = node.execute( + "postgres", + "select setting from pg_settings where name = 'wal_block_size'" + ) return int(var[0][0]) def get_pgpro_edition(self, node): - if node.execute("postgres", "select exists(select 1 from pg_proc where proname = 'pgpro_edition')")[0][0]: + if node.execute( + "postgres", + "select exists (select 1 from" + " pg_proc where proname = 'pgpro_edition')" + )[0][0]: var = node.execute("postgres", "select pgpro_edition()") return str(var[0][0]) else: @@ -659,7 +816,9 @@ class ProbackupTest(object): def switch_wal_segment(self, node): """ Execute pg_switch_wal/xlog() in given node""" - if testgres.version_to_num(node.safe_psql("postgres", "show server_version")) >= testgres.version_to_num('10.0'): + if testgres.version_to_num( + node.safe_psql("postgres", "show server_version") + ) >= testgres.version_to_num('10.0'): node.safe_psql("postgres", "select pg_switch_wal()") else: node.safe_psql("postgres", "select pg_switch_xlog()") @@ -674,29 +833,48 @@ class ProbackupTest(object): except: pass - shutil.rmtree(os.path.join(self.tmp_path, module_name, fname), - ignore_errors=True) + shutil.rmtree( + os.path.join( + self.tmp_path, + module_name, + fname + ), + ignore_errors=True + ) try: os.rmdir(os.path.join(self.tmp_path, module_name)) except: pass def pgdata_content(self, directory): - """ return dict with directory content. TAKE IT AFTER CHECKPOINT or BACKUP""" - dirs_to_ignore = ['pg_xlog', 'pg_wal', 'pg_log', 'pg_stat_tmp', 'pg_subtrans', 'pg_notify'] - files_to_ignore = ['postmaster.pid', 'postmaster.opts', 'pg_internal.init', 'postgresql.auto.conf'] - suffixes_to_ignore = ('_ptrack', 'ptrack_control', 'pg_control', 'ptrack_init') + """ return dict with directory content. " + " TAKE IT AFTER CHECKPOINT or BACKUP""" + dirs_to_ignore = [ + 'pg_xlog', 'pg_wal', 'pg_log', + 'pg_stat_tmp', 'pg_subtrans', 'pg_notify' + ] + files_to_ignore = [ + 'postmaster.pid', 'postmaster.opts', + 'pg_internal.init', 'postgresql.auto.conf' + ] + suffixes_to_ignore = ( + '_ptrack', 'ptrack_control', + 'pg_control', 'ptrack_init' + ) directory_dict = {} directory_dict['pgdata'] = directory directory_dict['files'] = {} for root, dirs, files in os.walk(directory, followlinks=True): dirs[:] = [d for d in dirs if d not in dirs_to_ignore] for file in files: - if file in files_to_ignore or file.endswith(suffixes_to_ignore): + if file in files_to_ignore or file.endswith( + suffixes_to_ignore + ): continue - file = os.path.join(root,file) + file = os.path.join(root, file) file_relpath = os.path.relpath(file, directory) - directory_dict['files'][file_relpath] = hashlib.md5(open(file, 'rb').read()).hexdigest() + directory_dict['files'][file_relpath] = hashlib.md5( + open(file, 'rb').read()).hexdigest() return directory_dict def compare_pgdata(self, original_pgdata, restored_pgdata): @@ -705,14 +883,114 @@ class ProbackupTest(object): error_message = '' for file in original_pgdata['files']: if file in restored_pgdata['files']: - if original_pgdata['files'][file] != restored_pgdata['files'][file]: - error_message += '\nChecksumm mismatch.\n File_old: {0}\n Checksumm_old: {1}\n File_new: {2}\n Checksumm_new: {3}\n'.format( + if ( + original_pgdata['files'][file] != + restored_pgdata['files'][file] + ): + error_message += '\nChecksumm mismatch.\n' + ' File_old: {0}\n Checksumm_old: {1}\n' + ' File_new: {2}\n Checksumm_new: {3}\n'.format( os.path.join(original_pgdata['pgdata'], file), original_pgdata['files'][file], os.path.join(restored_pgdata['pgdata'], file), - restored_pgdata['files'][file]) + restored_pgdata['files'][file] + ) fail = True else: - error_message += '\nFile dissappearance. File: {0}/{1}'.format(restored_pgdata['pgdata'], file) + error_message += '\nFile dissappearance.' + ' File: {0}/{1}'.format(restored_pgdata['pgdata'], file) fail = True self.assertFalse(fail, error_message) + + +class GdbException(Exception): + def __init__(self, message=False): + self.message = message + + def __str__(self): + return '\n ERROR: {0}\n'.format(repr(self.message)) + + +class GDBobj(ProbackupTest): + def __init__(self, cmd, verbose): + self.base_cmd = [ + '/usr/bin/gdb', + '--interpreter', + 'mi2', + '--args' + ] + cmd + self.verbose = verbose + if self.verbose: + print([' '.join(map(str, self.base_cmd))]) + + self.proc = subprocess.Popen( + self.base_cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + bufsize=0, universal_newlines=True + ) + self.gdb_pid = self.proc.pid + + # discard stuff + while True: + line = self.proc.stdout.readline() + if not line.startswith('(gdb)'): + pass + else: + break + + def set_breakpoint(self, location): + result = self._execute('break ' + location) + success = False + for line in result.splitlines(): + # Success + if line.startswith('~"Breakpoint'): + success = True + break + if line.startswith('^error') or line.startswith('(gdb)'): + break + # discard initial data from pipe, + # is there a way to do it a less derpy way? + if line.startswith('&'): + if line.startswith('&"break'): + pass + if line.startswith('&"Function'): + GdbBreakpointException = GdbException() + raise GdbBreakpointException(line) + if line.startswith('&"No line'): + GdbBreakpointException = GdbException() + raise GdbBreakpointException(line) + return success + + def run(self): + result = self._execute('run') + for line in result.splitlines(): + if line.startswith('*stopped,reason="breakpoint-hit"'): + return 'breakpoint-hit' + if line.startswith('*stopped,reason="exited-normally"'): + return 'exit correct' + + def continue_execution(self, sync=True): + result = self._execute('continue') + for line in result.splitlines(): + if line.startswith('*stopped,reason="breakpoint-hit"'): + return 'breakpoint-hit' + if line.startswith('*stopped,reason="exited-normally"'): + return 'exit correct' + + # use for breakpoint, run, continue + def _execute(self, cmd): + output = '' + self.proc.stdin.flush() + self.proc.stdin.write(cmd + '\n') + self.proc.stdin.flush() + + while True: + line = self.proc.stdout.readline() + output = output + line + if self.verbose: + print(line) + if line == '^done\n' or line.startswith('*stopped'): + break + return output diff --git a/tests/ptrack.py b/tests/ptrack.py index a0dc489f..fd1792f1 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -16,7 +16,6 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure def test_ptrack_enable(self): """make ptrack without full backup, should result in error""" - self.maxDiff = None fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), @@ -46,7 +45,6 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure def test_ptrack_disable(self): """Take full backup, disable ptrack restart postgresql, enable ptrack, restart postgresql, take ptrack backup which should fail""" - self.maxDiff = None fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), @@ -89,6 +87,59 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase): # Clean after yourself self.del_test_dir(module_name, fname) + # @unittest.skip("skip") + def test_ptrack_get_block(self): + """make node, make full and ptrack stream backups, restore them and check data correctness""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '300s', 'ptrack_enable': 'on'} + ) + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.start() + + node.safe_psql( + "postgres", + "create table t_heap as select i as id from generate_series(0,1) i") + + self.backup_node(backup_dir, 'node', node, options=['--stream']) + gdb = self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-file=verbose'], gdb=True) + + if gdb.set_breakpoint('make_pagemap_from_ptrack'): + result = gdb.run() + else: + self.assertTrue(False, 'Cannot set breakpoint') + + if result != 'breakpoint-hit': + print('Error in hitting breaking point') + sys.exit(1) + + node.safe_psql( + "postgres", + "update t_heap set id = 100500") + print(node.safe_psql( + "postgres", + "select * from t_heap")) + + if not gdb.continue_execution(): + print('Error in continue_execution') + + self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + + result = node.safe_psql("postgres", "SELECT * FROM t_heap") + node.cleanup() + self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) + + node.start() + self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap")) + + # Clean after yourself + self.del_test_dir(module_name, fname) + # @unittest.skip("skip") def test_ptrack_stream(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" @@ -118,7 +169,7 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase): "postgres", "insert into t_heap select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i") ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap") - ptrack_backup_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + ptrack_backup_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-file=verbose']) pgdata = self.pgdata_content(node.data_dir) # Drop Node @@ -450,7 +501,6 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_alter_table_set_tablespace_ptrack(self): """Make node, create tablespace with table, take full backup, alter tablespace location, take ptrack backup, restore database.""" - self.maxDiff = None fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), @@ -512,7 +562,9 @@ class PtrackBackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_alter_database_set_tablespace_ptrack(self): - """Make node, create tablespace with database, take full backup, alter tablespace location, take ptrack backup, restore database.""" + """Make node, create tablespace with database," + " take full backup, alter tablespace location," + " take ptrack backup, restore database.""" self.maxDiff = None fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')