2017-05-05 15:21:49 +02:00
import os
2017-06-27 11:43:45 +02:00
import unittest
2017-06-27 07:42:52 +02:00
from . helpers . ptrack_helpers import ProbackupTest , ProbackupException
2017-02-17 15:49:06 +02:00
from datetime import datetime , timedelta
2016-12-14 16:14:53 +02:00
import subprocess
2017-12-14 20:33:48 +02:00
from sys import exit
2018-03-17 00:41:35 +02:00
import time
2018-10-22 09:01:53 +02:00
import hashlib
2016-12-14 16:14:53 +02:00
2017-07-12 16:28:28 +02:00
module_name = ' validate '
2016-12-14 16:14:53 +02:00
2017-07-12 16:28:28 +02:00
class ValidateTest ( ProbackupTest , unittest . TestCase ) :
2017-05-05 15:21:49 +02:00
2018-10-11 15:26:23 +02:00
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_validate_nullified_heap_page_backup ( self ) :
"""
make node with nullified heap block
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2018-10-11 15:26:23 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
node . pgbench_init ( scale = 3 )
file_path = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' pgbench_accounts ' ) " ) . rstrip ( )
node . safe_psql (
" postgres " ,
" CHECKPOINT " )
# Nullify some block in PostgreSQL
file = os . path . join ( node . data_dir , file_path )
with open ( file , ' r+b ' ) as f :
f . seek ( 8192 )
f . write ( b " \x00 " * 8192 )
f . flush ( )
f . close
self . backup_node (
2018-11-12 10:51:58 +02:00
backup_dir , ' node ' , node , options = [ ' --log-level-file=verbose ' ] )
2018-10-11 15:26:23 +02:00
2019-03-18 11:04:18 +02:00
if self . paranoia :
pgdata = self . pgdata_content ( node . data_dir )
2018-10-11 15:26:23 +02:00
2019-03-18 11:04:18 +02:00
if not self . remote :
log_file_path = os . path . join ( backup_dir , " log " , " pg_probackup.log " )
with open ( log_file_path ) as f :
self . assertTrue (
' {0} blknum 1, empty page ' . format ( file_path ) in f . read ( ) ,
' Failed to detect nullified block ' )
2018-10-11 15:26:23 +02:00
2019-03-05 17:35:31 +02:00
self . validate_pb ( backup_dir , options = [ " -j " , " 4 " ] )
2019-03-18 11:04:18 +02:00
node . cleanup ( )
self . restore_node ( backup_dir , ' node ' , node )
if self . paranoia :
pgdata_restored = self . pgdata_content ( node . data_dir )
self . compare_pgdata ( pgdata , pgdata_restored )
2018-10-11 15:26:23 +02:00
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-05-25 11:53:33 +02:00
# @unittest.skip("skip")
# @unittest.expectedFailure
2017-05-22 13:17:43 +02:00
def test_validate_wal_unreal_values ( self ) :
2018-03-17 00:41:35 +02:00
"""
make node with archiving , make archive backup
validate to both real and unreal values
"""
2017-05-05 15:21:49 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-03-17 00:41:35 +02:00
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2017-07-12 16:28:28 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
2017-06-20 12:57:23 +02:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-10-11 15:26:23 +02:00
node . slow_start ( )
2017-06-20 12:57:23 +02:00
2018-09-03 16:31:04 +02:00
node . pgbench_init ( scale = 3 )
2017-05-05 15:21:49 +02:00
with node . connect ( " postgres " ) as con :
con . execute ( " CREATE TABLE tbl0005 (a text) " )
con . commit ( )
2017-06-20 12:57:23 +02:00
backup_id = self . backup_node ( backup_dir , ' node ' , node )
2017-05-05 15:21:49 +02:00
2018-09-03 16:31:04 +02:00
node . pgbench_init ( scale = 3 )
2017-05-05 15:21:49 +02:00
2018-03-17 00:41:35 +02:00
target_time = self . show_pb (
backup_dir , ' node ' , backup_id ) [ ' recovery-time ' ]
2017-05-25 11:53:33 +02:00
after_backup_time = datetime . now ( ) . replace ( second = 0 , microsecond = 0 )
2017-05-05 15:21:49 +02:00
# Validate to real time
2018-03-17 00:41:35 +02:00
self . assertIn (
2019-03-14 17:59:28 +02:00
" INFO: Backup validation completed successfully " ,
2018-03-17 00:41:35 +02:00
self . validate_pb (
backup_dir , ' node ' ,
2019-03-05 17:35:31 +02:00
options = [ " --time= {0} " . format ( target_time ) , " -j " , " 4 " ] ) ,
2018-03-17 00:41:35 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( self . output ) , self . cmd ) )
2017-05-05 15:21:49 +02:00
# Validate to unreal time
2017-05-25 11:53:33 +02:00
unreal_time_1 = after_backup_time - timedelta ( days = 2 )
2017-05-05 15:21:49 +02:00
try :
2018-03-17 00:41:35 +02:00
self . validate_pb (
backup_dir , ' node ' , options = [ " --time= {0} " . format (
2019-03-05 17:35:31 +02:00
unreal_time_1 ) , " -j " , " 4 " ] )
2018-03-17 00:41:35 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of validation to unreal time. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-06-27 07:42:52 +02:00
except ProbackupException as e :
2018-12-29 11:42:59 +02:00
self . assertIn (
' ERROR: Backup satisfying target options is not found ' ,
2018-03-17 00:41:35 +02:00
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-05-05 15:21:49 +02:00
# Validate to unreal time #2
2017-05-25 11:53:33 +02:00
unreal_time_2 = after_backup_time + timedelta ( days = 2 )
2017-05-05 15:21:49 +02:00
try :
2018-09-03 16:31:04 +02:00
self . validate_pb (
backup_dir , ' node ' ,
2019-03-05 17:35:31 +02:00
options = [ " --time= {0} " . format ( unreal_time_2 ) , " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of validation to unreal time. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-06-27 07:42:52 +02:00
except ProbackupException as e :
2018-09-03 16:31:04 +02:00
self . assertTrue (
2019-03-14 17:59:28 +02:00
' ERROR: Not enough WAL records to time ' in e . message ,
2018-09-03 16:31:04 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-05-05 15:21:49 +02:00
# Validate to real xid
target_xid = None
with node . connect ( " postgres " ) as con :
2018-09-03 16:31:04 +02:00
res = con . execute (
" INSERT INTO tbl0005 VALUES ( ' inserted ' ) RETURNING (xmin) " )
2017-05-05 15:21:49 +02:00
con . commit ( )
target_xid = res [ 0 ] [ 0 ]
2017-10-11 17:08:56 +02:00
self . switch_wal_segment ( node )
2019-03-27 17:15:15 +02:00
time . sleep ( 5 )
2017-05-05 15:21:49 +02:00
2018-09-03 16:31:04 +02:00
self . assertIn (
2019-03-14 17:59:28 +02:00
" INFO: Backup validation completed successfully " ,
2018-09-03 16:31:04 +02:00
self . validate_pb (
2019-03-05 17:35:31 +02:00
backup_dir , ' node ' , options = [ " --xid= {0} " . format ( target_xid ) ,
" -j " , " 4 " ] ) ,
2018-09-03 16:31:04 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( self . output ) , self . cmd ) )
2017-05-05 15:21:49 +02:00
# Validate to unreal xid
2017-05-25 11:53:33 +02:00
unreal_xid = int ( target_xid ) + 1000
2017-05-05 15:21:49 +02:00
try :
2018-09-03 16:31:04 +02:00
self . validate_pb (
2019-03-05 17:35:31 +02:00
backup_dir , ' node ' , options = [ " --xid= {0} " . format ( unreal_xid ) ,
" -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of validation to unreal xid. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-06-27 07:42:52 +02:00
except ProbackupException as e :
2018-09-03 16:31:04 +02:00
self . assertTrue (
2019-03-14 17:59:28 +02:00
' ERROR: Not enough WAL records to xid ' in e . message ,
2018-09-03 16:31:04 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-05-05 15:21:49 +02:00
# Validate with backup ID
2019-03-05 17:35:31 +02:00
output = self . validate_pb ( backup_dir , ' node ' , backup_id ,
options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertIn (
" INFO: Validating backup {0} " . format ( backup_id ) ,
output ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( self . output ) , self . cmd ) )
self . assertIn (
" INFO: Backup {0} data files are valid " . format ( backup_id ) ,
output ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( self . output ) , self . cmd ) )
self . assertIn (
" INFO: Backup {0} WAL segments are valid " . format ( backup_id ) ,
output ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( self . output ) , self . cmd ) )
self . assertIn (
" INFO: Backup {0} is valid " . format ( backup_id ) ,
output ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( self . output ) , self . cmd ) )
self . assertIn (
" INFO: Validate of backup {0} completed " . format ( backup_id ) ,
output ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( self . output ) , self . cmd ) )
2017-05-22 13:17:43 +02:00
2017-06-27 07:42:52 +02:00
# Clean after yourself
2017-07-12 16:28:28 +02:00
self . del_test_dir ( module_name , fname )
2017-06-27 07:42:52 +02:00
2017-12-19 11:56:03 +02:00
# @unittest.skip("skip")
def test_validate_corrupted_intermediate_backup ( self ) :
2018-09-03 16:31:04 +02:00
"""
make archive node , take FULL , PAGE1 , PAGE2 backups ,
corrupt file in PAGE1 backup ,
run validate on PAGE1 , expect PAGE1 to gain status CORRUPT
and PAGE2 gain status ORPHAN
"""
2017-12-19 11:56:03 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-09-03 16:31:04 +02:00
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2017-12-19 11:56:03 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-12-19 11:56:03 +02:00
# FULL
backup_id_1 = self . backup_node ( backup_dir , ' node ' , node )
node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" create table t_heap as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
file_path = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap ' ) " ) . rstrip ( )
# PAGE1
2018-09-03 16:31:04 +02:00
backup_id_2 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(10000,20000) i " )
2017-12-19 11:56:03 +02:00
# PAGE2
2018-09-03 16:31:04 +02:00
backup_id_3 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# Corrupt some file
2018-09-03 16:31:04 +02:00
file = os . path . join (
2018-12-27 21:40:23 +02:00
backup_dir , ' backups ' , ' node ' ,
backup_id_2 , ' database ' , file_path )
2018-10-11 15:26:23 +02:00
with open ( file , " r+b " , 0 ) as f :
2017-12-19 11:56:03 +02:00
f . seek ( 42 )
f . write ( b " blah " )
f . flush ( )
f . close
# Simple validate
try :
2018-09-03 16:31:04 +02:00
self . validate_pb (
2019-03-06 10:58:42 +02:00
backup_dir , ' node ' , backup_id = backup_id_2 , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data files corruption. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
except ProbackupException as e :
self . assertTrue (
2018-09-03 16:31:04 +02:00
' INFO: Validating parents for backup {0} ' . format (
backup_id_2 ) in e . message and
' ERROR: Backup {0} is corrupt ' . format (
2018-10-11 15:26:23 +02:00
backup_id_2 ) in e . message and
' WARNING: Backup {0} data files are corrupted ' . format (
2018-09-03 16:31:04 +02:00
backup_id_2 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
2018-09-03 16:31:04 +02:00
self . assertEqual (
' CORRUPT ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_2 ) [ ' status ' ] ,
' Backup STATUS should be " CORRUPT " ' )
self . assertEqual (
' ORPHAN ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_3 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
2017-12-19 11:56:03 +02:00
# Clean after yourself
2018-05-03 13:12:19 +02:00
self . del_test_dir ( module_name , fname )
2017-12-19 11:56:03 +02:00
# @unittest.skip("skip")
def test_validate_corrupted_intermediate_backups ( self ) :
2018-09-03 16:31:04 +02:00
"""
make archive node , take FULL , PAGE1 , PAGE2 backups ,
2017-12-19 11:56:03 +02:00
corrupt file in FULL and PAGE1 backupd , run validate on PAGE1 ,
2018-09-03 16:31:04 +02:00
expect FULL and PAGE1 to gain status CORRUPT and
PAGE2 gain status ORPHAN
"""
2017-12-19 11:56:03 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-09-03 16:31:04 +02:00
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2017-12-19 11:56:03 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-12-19 11:56:03 +02:00
node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" create table t_heap as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
file_path_t_heap = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap ' ) " ) . rstrip ( )
# FULL
backup_id_1 = self . backup_node ( backup_dir , ' node ' , node )
node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" create table t_heap_1 as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
file_path_t_heap_1 = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap_1 ' ) " ) . rstrip ( )
# PAGE1
2018-09-03 16:31:04 +02:00
backup_id_2 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(20000,30000) i " )
2017-12-19 11:56:03 +02:00
# PAGE2
2018-09-03 16:31:04 +02:00
backup_id_3 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# Corrupt some file in FULL backup
2018-09-03 16:31:04 +02:00
file_full = os . path . join (
2018-12-27 21:40:23 +02:00
backup_dir , ' backups ' , ' node ' ,
2018-09-03 16:31:04 +02:00
backup_id_1 , ' database ' , file_path_t_heap )
2017-12-19 11:56:03 +02:00
with open ( file_full , " rb+ " , 0 ) as f :
f . seek ( 84 )
f . write ( b " blah " )
f . flush ( )
f . close
# Corrupt some file in PAGE1 backup
2018-09-03 16:31:04 +02:00
file_page1 = os . path . join (
2018-12-27 21:40:23 +02:00
backup_dir , ' backups ' , ' node ' ,
2018-09-03 16:31:04 +02:00
backup_id_2 , ' database ' , file_path_t_heap_1 )
2017-12-19 11:56:03 +02:00
with open ( file_page1 , " rb+ " , 0 ) as f :
f . seek ( 42 )
f . write ( b " blah " )
f . flush ( )
f . close
# Validate PAGE1
try :
2018-09-03 16:31:04 +02:00
self . validate_pb (
2019-03-06 10:58:42 +02:00
backup_dir , ' node ' , backup_id = backup_id_2 , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data files corruption. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
except ProbackupException as e :
self . assertTrue (
2018-09-03 16:31:04 +02:00
' INFO: Validating parents for backup {0} ' . format (
backup_id_2 ) in e . message ,
' \n Unexpected Error Message: {0} \n '
' CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-09-03 16:31:04 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_1 ) in e . message and
2018-12-27 21:40:23 +02:00
' WARNING: Invalid CRC of backup file ' in e . message and
2018-09-03 16:31:04 +02:00
' WARNING: Backup {0} data files are corrupted ' . format (
backup_id_1 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue (
' WARNING: Backup {0} is orphaned because his parent ' . format (
backup_id_2 ) in e . message and
' WARNING: Backup {0} is orphaned because his parent ' . format (
backup_id_3 ) in e . message and
' ERROR: Backup {0} is orphan. ' . format (
backup_id_2 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
2018-09-03 16:31:04 +02:00
self . assertEqual (
' CORRUPT ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_1 ) [ ' status ' ] ,
' Backup STATUS should be " CORRUPT " ' )
self . assertEqual (
' ORPHAN ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_2 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
self . assertEqual (
' ORPHAN ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_3 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
2017-12-19 11:56:03 +02:00
# Clean after yourself
2018-05-03 13:12:19 +02:00
self . del_test_dir ( module_name , fname )
2017-12-19 11:56:03 +02:00
2019-02-18 20:27:23 +02:00
# @unittest.skip("skip")
2019-02-18 20:42:56 +02:00
def test_validate_specific_error_intermediate_backups ( self ) :
2019-02-18 20:27:23 +02:00
"""
make archive node , take FULL , PAGE1 , PAGE2 backups ,
change backup status of FULL and PAGE1 to ERROR ,
run validate on PAGE1
2019-02-18 20:42:56 +02:00
purpose of this test is to be sure that not only
2019-02-18 20:27:23 +02:00
CORRUPT backup descendants can be orphanized
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2019-02-18 20:27:23 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
# FULL
backup_id_1 = self . backup_node ( backup_dir , ' node ' , node )
# PAGE1
backup_id_2 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
# PAGE2
backup_id_3 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
# Change FULL backup status to ERROR
control_path = os . path . join (
backup_dir , ' backups ' , ' node ' , backup_id_1 , ' backup.control ' )
with open ( control_path , ' r ' ) as f :
actual_control = f . read ( )
new_control_file = ' '
for line in actual_control . splitlines ( ) :
new_control_file + = line . replace (
' status = OK ' , ' status = ERROR ' )
new_control_file + = ' \n '
with open ( control_path , ' wt ' ) as f :
f . write ( new_control_file )
f . flush ( )
f . close ( )
# Validate PAGE1
try :
self . validate_pb (
2019-03-06 10:58:42 +02:00
backup_dir , ' node ' , backup_id = backup_id_2 , options = [ " -j " , " 4 " ] )
2019-02-18 20:27:23 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because backup has status ERROR. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertTrue (
' WARNING: Backup {0} is orphaned because '
' his parent {1} has status: ERROR ' . format (
backup_id_2 , backup_id_1 ) in e . message and
' INFO: Validating parents for backup {0} ' . format (
backup_id_2 ) in e . message and
' WARNING: Backup {0} has status ERROR. Skip validation. ' . format (
backup_id_1 ) and
' ERROR: Backup {0} is orphan. ' . format ( backup_id_2 ) in e . message ,
' \n Unexpected Error Message: {0} \n '
' CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertEqual (
' ERROR ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_1 ) [ ' status ' ] ,
' Backup STATUS should be " ERROR " ' )
self . assertEqual (
' ORPHAN ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_2 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
self . assertEqual (
' ORPHAN ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_3 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2019-02-18 20:42:56 +02:00
# @unittest.skip("skip")
def test_validate_error_intermediate_backups ( self ) :
"""
make archive node , take FULL , PAGE1 , PAGE2 backups ,
change backup status of FULL and PAGE1 to ERROR ,
run validate on instance
purpose of this test is to be sure that not only
CORRUPT backup descendants can be orphanized
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2019-02-18 20:42:56 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
# FULL
backup_id_1 = self . backup_node ( backup_dir , ' node ' , node )
# PAGE1
backup_id_2 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
# PAGE2
backup_id_3 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
# Change FULL backup status to ERROR
control_path = os . path . join (
backup_dir , ' backups ' , ' node ' , backup_id_1 , ' backup.control ' )
with open ( control_path , ' r ' ) as f :
actual_control = f . read ( )
new_control_file = ' '
for line in actual_control . splitlines ( ) :
new_control_file + = line . replace (
' status = OK ' , ' status = ERROR ' )
new_control_file + = ' \n '
with open ( control_path , ' wt ' ) as f :
f . write ( new_control_file )
f . flush ( )
f . close ( )
# Validate instance
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , options = [ " -j " , " 4 " ] )
2019-02-18 20:42:56 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because backup has status ERROR. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertTrue (
" WARNING: Backup {0} is orphaned because "
" his parent {1} has status: ERROR " . format (
backup_id_2 , backup_id_1 ) in e . message and
' WARNING: Backup {0} has status ERROR. Skip validation ' . format (
backup_id_1 ) in e . message and
" WARNING: Some backups are not valid " in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertEqual (
' ERROR ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_1 ) [ ' status ' ] ,
' Backup STATUS should be " ERROR " ' )
self . assertEqual (
' ORPHAN ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_2 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
self . assertEqual (
' ORPHAN ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_3 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-12-19 11:56:03 +02:00
# @unittest.skip("skip")
def test_validate_corrupted_intermediate_backups_1 ( self ) :
2018-09-03 16:31:04 +02:00
"""
make archive node , FULL1 , PAGE1 , PAGE2 , PAGE3 , PAGE4 , PAGE5 , FULL2 ,
2017-12-19 11:56:03 +02:00
corrupt file in PAGE1 and PAGE4 , run validate on PAGE3 ,
2018-09-03 16:31:04 +02:00
expect PAGE1 to gain status CORRUPT , PAGE2 , PAGE3 , PAGE4 and PAGE5
to gain status ORPHAN
"""
2017-12-19 11:56:03 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-01-18 03:35:27 +02:00
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2017-12-19 11:56:03 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-12-19 11:56:03 +02:00
# FULL1
backup_id_1 = self . backup_node ( backup_dir , ' node ' , node )
# PAGE1
node . safe_psql (
" postgres " ,
2018-01-18 03:35:27 +02:00
" create table t_heap as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
backup_id_2 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# PAGE2
node . safe_psql (
" postgres " ,
2018-01-18 03:35:27 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
file_page_2 = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap ' ) " ) . rstrip ( )
2018-01-18 03:35:27 +02:00
backup_id_3 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# PAGE3
node . safe_psql (
" postgres " ,
2018-01-18 03:35:27 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(10000,20000) i " )
backup_id_4 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# PAGE4
node . safe_psql (
" postgres " ,
2018-01-18 03:35:27 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(20000,30000) i " )
backup_id_5 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
2018-01-18 03:35:27 +02:00
# PAGE5
2017-12-19 11:56:03 +02:00
node . safe_psql (
" postgres " ,
2018-01-18 03:35:27 +02:00
" create table t_heap1 as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
file_page_5 = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap1 ' ) " ) . rstrip ( )
2018-01-18 03:35:27 +02:00
backup_id_6 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# PAGE6
node . safe_psql (
" postgres " ,
2018-01-18 03:35:27 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(30000,40000) i " )
backup_id_7 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# FULL2
backup_id_8 = self . backup_node ( backup_dir , ' node ' , node )
# Corrupt some file in PAGE2 and PAGE5 backups
2018-01-18 03:35:27 +02:00
file_page1 = os . path . join (
2018-12-27 21:40:23 +02:00
backup_dir , ' backups ' , ' node ' , backup_id_3 , ' database ' , file_page_2 )
2017-12-19 11:56:03 +02:00
with open ( file_page1 , " rb+ " , 0 ) as f :
f . seek ( 84 )
f . write ( b " blah " )
f . flush ( )
f . close
2018-01-18 03:35:27 +02:00
file_page4 = os . path . join (
2018-12-27 21:40:23 +02:00
backup_dir , ' backups ' , ' node ' , backup_id_6 , ' database ' , file_page_5 )
2017-12-19 11:56:03 +02:00
with open ( file_page4 , " rb+ " , 0 ) as f :
f . seek ( 42 )
f . write ( b " blah " )
f . flush ( )
f . close
# Validate PAGE3
try :
2018-01-18 03:35:27 +02:00
self . validate_pb (
backup_dir , ' node ' ,
2019-03-06 10:58:42 +02:00
backup_id = backup_id_4 , options = [ " -j " , " 4 " ] )
2018-01-18 03:35:27 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data files corruption. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
except ProbackupException as e :
self . assertTrue (
2018-01-18 03:35:27 +02:00
' INFO: Validating parents for backup {0} ' . format (
backup_id_4 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_1 ) in e . message and
' INFO: Backup {0} data files are valid ' . format (
backup_id_1 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_2 ) in e . message and
' INFO: Backup {0} data files are valid ' . format (
backup_id_2 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_3 ) in e . message and
2018-12-27 21:40:23 +02:00
' WARNING: Invalid CRC of backup file ' in e . message and
2018-01-18 03:35:27 +02:00
' WARNING: Backup {0} data files are corrupted ' . format (
backup_id_3 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' WARNING: Backup {0} is orphaned because '
2018-09-03 16:31:04 +02:00
' his parent {1} has status: CORRUPT ' . format (
2018-01-18 03:35:27 +02:00
backup_id_4 , backup_id_3 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' WARNING: Backup {0} is orphaned because '
2018-09-03 16:31:04 +02:00
' his parent {1} has status: CORRUPT ' . format (
2018-01-18 03:35:27 +02:00
backup_id_5 , backup_id_3 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' WARNING: Backup {0} is orphaned because '
2018-09-03 16:31:04 +02:00
' his parent {1} has status: CORRUPT ' . format (
2018-01-18 03:35:27 +02:00
backup_id_6 , backup_id_3 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' WARNING: Backup {0} is orphaned because '
2018-09-03 16:31:04 +02:00
' his parent {1} has status: CORRUPT ' . format (
2018-01-18 03:35:27 +02:00
backup_id_7 , backup_id_3 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
' ERROR: Backup {0} is orphan ' . format ( backup_id_4 ) in e . message ,
2018-01-18 03:35:27 +02:00
' \n Unexpected Error Message: {0} \n '
' CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
2018-01-18 03:35:27 +02:00
self . assertEqual (
' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_1 ) [ ' status ' ] ,
' Backup STATUS should be " OK " ' )
self . assertEqual (
' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_2 ) [ ' status ' ] ,
' Backup STATUS should be " OK " ' )
self . assertEqual (
' CORRUPT ' , self . show_pb ( backup_dir , ' node ' , backup_id_3 ) [ ' status ' ] ,
' Backup STATUS should be " CORRUPT " ' )
self . assertEqual (
' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_4 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
self . assertEqual (
' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_5 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
self . assertEqual (
' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_6 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
self . assertEqual (
' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_7 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
self . assertEqual (
' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_8 ) [ ' status ' ] ,
' Backup STATUS should be " OK " ' )
2017-12-19 11:56:03 +02:00
# Clean after yourself
2018-05-03 13:12:19 +02:00
self . del_test_dir ( module_name , fname )
2017-12-19 11:56:03 +02:00
# @unittest.skip("skip")
def test_validate_specific_target_corrupted_intermediate_backups ( self ) :
2018-09-03 16:31:04 +02:00
"""
make archive node , take FULL1 , PAGE1 , PAGE2 , PAGE3 , PAGE4 , PAGE5 , FULL2
2017-12-19 11:56:03 +02:00
corrupt file in PAGE1 and PAGE4 , run validate on PAGE3 to specific xid ,
2018-09-03 16:31:04 +02:00
expect PAGE1 to gain status CORRUPT , PAGE2 , PAGE3 , PAGE4 and PAGE5 to
gain status ORPHAN
"""
2017-12-19 11:56:03 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-09-03 16:31:04 +02:00
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2017-12-19 11:56:03 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-12-19 11:56:03 +02:00
# FULL1
backup_id_1 = self . backup_node ( backup_dir , ' node ' , node )
# PAGE1
node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" create table t_heap as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
backup_id_2 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# PAGE2
node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
file_page_2 = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap ' ) " ) . rstrip ( )
2018-09-03 16:31:04 +02:00
backup_id_3 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# PAGE3
node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(10000,20000) i " )
backup_id_4 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# PAGE4
target_xid = node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(20000,30000) i RETURNING (xmin) " ) [ 0 ] [ 0 ]
backup_id_5 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
2018-09-03 16:31:04 +02:00
# PAGE5
2017-12-19 11:56:03 +02:00
node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" create table t_heap1 as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
file_page_5 = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap1 ' ) " ) . rstrip ( )
2018-09-03 16:31:04 +02:00
backup_id_6 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# PAGE6
node . safe_psql (
" postgres " ,
2018-09-03 16:31:04 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(30000,40000) i " )
backup_id_7 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# FULL2
backup_id_8 = self . backup_node ( backup_dir , ' node ' , node )
# Corrupt some file in PAGE2 and PAGE5 backups
2018-09-03 16:31:04 +02:00
file_page1 = os . path . join (
2018-12-27 21:40:23 +02:00
backup_dir , ' backups ' , ' node ' ,
backup_id_3 , ' database ' , file_page_2 )
2017-12-19 11:56:03 +02:00
with open ( file_page1 , " rb+ " , 0 ) as f :
f . seek ( 84 )
f . write ( b " blah " )
f . flush ( )
f . close
2018-09-03 16:31:04 +02:00
file_page4 = os . path . join (
2018-12-27 21:40:23 +02:00
backup_dir , ' backups ' , ' node ' ,
backup_id_6 , ' database ' , file_page_5 )
2017-12-19 11:56:03 +02:00
with open ( file_page4 , " rb+ " , 0 ) as f :
f . seek ( 42 )
f . write ( b " blah " )
f . flush ( )
f . close
# Validate PAGE3
try :
2018-09-03 16:31:04 +02:00
self . validate_pb (
backup_dir , ' node ' ,
options = [
2019-03-06 10:58:42 +02:00
' -i ' , backup_id_4 , ' --xid= {0} ' . format ( target_xid ) ,
" -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data files corruption. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
except ProbackupException as e :
self . assertTrue (
2018-09-03 16:31:04 +02:00
' INFO: Validating parents for backup {0} ' . format (
backup_id_4 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-09-03 16:31:04 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_1 ) in e . message and
' INFO: Backup {0} data files are valid ' . format (
backup_id_1 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-09-03 16:31:04 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_2 ) in e . message and
' INFO: Backup {0} data files are valid ' . format (
backup_id_2 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-09-03 16:31:04 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_3 ) in e . message and
2018-12-27 21:40:23 +02:00
' WARNING: Invalid CRC of backup file ' in e . message and
2018-09-03 16:31:04 +02:00
' WARNING: Backup {0} data files are corrupted ' . format (
backup_id_3 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-09-03 16:31:04 +02:00
' WARNING: Backup {0} is orphaned because his '
' parent {1} has status: CORRUPT ' . format (
backup_id_4 , backup_id_3 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-09-03 16:31:04 +02:00
' WARNING: Backup {0} is orphaned because his '
' parent {1} has status: CORRUPT ' . format (
backup_id_5 , backup_id_3 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-09-03 16:31:04 +02:00
' WARNING: Backup {0} is orphaned because his '
' parent {1} has status: CORRUPT ' . format (
backup_id_6 , backup_id_3 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-09-03 16:31:04 +02:00
' WARNING: Backup {0} is orphaned because his '
' parent {1} has status: CORRUPT ' . format (
backup_id_7 , backup_id_3 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-09-03 16:31:04 +02:00
' ERROR: Backup {0} is orphan ' . format (
backup_id_4 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertEqual ( ' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_1 ) [ ' status ' ] , ' Backup STATUS should be " OK " ' )
self . assertEqual ( ' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_2 ) [ ' status ' ] , ' Backup STATUS should be " OK " ' )
self . assertEqual ( ' CORRUPT ' , self . show_pb ( backup_dir , ' node ' , backup_id_3 ) [ ' status ' ] , ' Backup STATUS should be " CORRUPT " ' )
self . assertEqual ( ' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_4 ) [ ' status ' ] , ' Backup STATUS should be " ORPHAN " ' )
self . assertEqual ( ' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_5 ) [ ' status ' ] , ' Backup STATUS should be " ORPHAN " ' )
self . assertEqual ( ' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_6 ) [ ' status ' ] , ' Backup STATUS should be " ORPHAN " ' )
self . assertEqual ( ' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_7 ) [ ' status ' ] , ' Backup STATUS should be " ORPHAN " ' )
self . assertEqual ( ' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_8 ) [ ' status ' ] , ' Backup STATUS should be " OK " ' )
# Clean after yourself
2018-05-03 13:12:19 +02:00
self . del_test_dir ( module_name , fname )
2017-12-19 11:56:03 +02:00
# @unittest.skip("skip")
def test_validate_instance_with_corrupted_page ( self ) :
2018-09-03 16:31:04 +02:00
"""
make archive node , take FULL , PAGE1 , PAGE2 , FULL2 , PAGE3 backups ,
2017-12-19 11:56:03 +02:00
corrupt file in PAGE1 backup and run validate on instance ,
2018-09-03 16:31:04 +02:00
expect PAGE1 to gain status CORRUPT , PAGE2 to gain status ORPHAN
"""
2017-12-19 11:56:03 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-01-18 03:35:27 +02:00
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2017-12-19 11:56:03 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-12-19 11:56:03 +02:00
node . safe_psql (
" postgres " ,
2018-01-18 03:35:27 +02:00
" create table t_heap as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
# FULL1
backup_id_1 = self . backup_node ( backup_dir , ' node ' , node )
node . safe_psql (
" postgres " ,
2018-01-18 03:35:27 +02:00
" create table t_heap1 as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
file_path_t_heap1 = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap1 ' ) " ) . rstrip ( )
# PAGE1
2018-01-18 03:35:27 +02:00
backup_id_2 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
node . safe_psql (
" postgres " ,
2018-01-18 03:35:27 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(20000,30000) i " )
2017-12-19 11:56:03 +02:00
# PAGE2
2018-01-18 03:35:27 +02:00
backup_id_3 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# FULL1
2018-01-18 03:35:27 +02:00
backup_id_4 = self . backup_node (
backup_dir , ' node ' , node )
2017-12-19 11:56:03 +02:00
# PAGE3
2018-01-18 03:35:27 +02:00
backup_id_5 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# Corrupt some file in FULL backup
2018-01-18 03:35:27 +02:00
file_full = os . path . join (
2018-12-27 21:40:23 +02:00
backup_dir , ' backups ' , ' node ' , backup_id_2 ,
2018-01-18 03:35:27 +02:00
' database ' , file_path_t_heap1 )
2017-12-19 11:56:03 +02:00
with open ( file_full , " rb+ " , 0 ) as f :
f . seek ( 84 )
f . write ( b " blah " )
f . flush ( )
f . close
# Validate Instance
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , options = [ " -j " , " 4 " ] )
2018-01-18 03:35:27 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data files corruption. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
except ProbackupException as e :
self . assertTrue (
" INFO: Validate backups of the instance ' node ' " in e . message ,
2018-01-18 03:35:27 +02:00
" \n Unexpected Error Message: {0} \n "
" CMD: {1} " . format ( repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_5 ) in e . message and
' INFO: Backup {0} data files are valid ' . format (
backup_id_5 ) in e . message and
' INFO: Backup {0} WAL segments are valid ' . format (
backup_id_5 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_4 ) in e . message and
' INFO: Backup {0} data files are valid ' . format (
backup_id_4 ) in e . message and
' INFO: Backup {0} WAL segments are valid ' . format (
backup_id_4 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_3 ) in e . message and
' INFO: Backup {0} data files are valid ' . format (
backup_id_3 ) in e . message and
' INFO: Backup {0} WAL segments are valid ' . format (
backup_id_3 ) in e . message and
' WARNING: Backup {0} is orphaned because '
2018-09-03 16:31:04 +02:00
' his parent {1} has status: CORRUPT ' . format (
2018-01-18 03:35:27 +02:00
backup_id_3 , backup_id_2 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_2 ) in e . message and
2018-12-27 21:40:23 +02:00
' WARNING: Invalid CRC of backup file ' in e . message and
2018-01-18 03:35:27 +02:00
' WARNING: Backup {0} data files are corrupted ' . format (
backup_id_2 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-01-18 03:35:27 +02:00
' INFO: Validating backup {0} ' . format (
backup_id_1 ) in e . message and
' INFO: Backup {0} data files are valid ' . format (
backup_id_1 ) in e . message and
' INFO: Backup {0} WAL segments are valid ' . format (
backup_id_1 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
self . assertTrue (
2018-03-15 12:34:51 +02:00
' WARNING: Some backups are not valid ' in e . message ,
2018-01-18 03:35:27 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
2018-01-18 03:35:27 +02:00
self . assertEqual (
' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_1 ) [ ' status ' ] ,
' Backup STATUS should be " OK " ' )
self . assertEqual (
' CORRUPT ' , self . show_pb ( backup_dir , ' node ' , backup_id_2 ) [ ' status ' ] ,
' Backup STATUS should be " CORRUPT " ' )
self . assertEqual (
' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_3 ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
self . assertEqual (
' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_4 ) [ ' status ' ] ,
' Backup STATUS should be " OK " ' )
self . assertEqual (
' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_5 ) [ ' status ' ] ,
' Backup STATUS should be " OK " ' )
2017-12-19 11:56:03 +02:00
# Clean after yourself
2018-05-03 13:12:19 +02:00
self . del_test_dir ( module_name , fname )
2017-12-19 11:56:03 +02:00
2018-06-04 10:27:00 +02:00
# @unittest.skip("skip")
def test_validate_instance_with_corrupted_full_and_try_restore ( self ) :
""" make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups,
corrupt file in FULL backup and run validate on instance ,
expect FULL to gain status CORRUPT , PAGE1 and PAGE2 to gain status ORPHAN ,
try to restore backup with - - no - validation option """
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-12-26 21:59:13 +02:00
node = self . make_simple_node ( base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2018-06-04 10:27:00 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-06-04 10:27:00 +02:00
node . safe_psql (
" postgres " ,
2018-12-27 21:40:23 +02:00
" create table t_heap as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2018-06-04 10:27:00 +02:00
file_path_t_heap = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap ' ) " ) . rstrip ( )
# FULL1
backup_id_1 = self . backup_node ( backup_dir , ' node ' , node )
node . safe_psql (
" postgres " ,
2018-12-27 21:40:23 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2018-06-04 10:27:00 +02:00
# PAGE1
backup_id_2 = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# PAGE2
node . safe_psql (
" postgres " ,
2018-12-27 21:40:23 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(20000,30000) i " )
2018-06-04 10:27:00 +02:00
backup_id_3 = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# FULL1
backup_id_4 = self . backup_node ( backup_dir , ' node ' , node )
# PAGE3
node . safe_psql (
" postgres " ,
2018-12-27 21:40:23 +02:00
" insert into t_heap select i as id, "
" md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(30000,40000) i " )
2018-06-04 10:27:00 +02:00
backup_id_5 = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# Corrupt some file in FULL backup
2018-12-27 21:40:23 +02:00
file_full = os . path . join (
backup_dir , ' backups ' , ' node ' ,
backup_id_1 , ' database ' , file_path_t_heap )
2018-06-04 10:27:00 +02:00
with open ( file_full , " rb+ " , 0 ) as f :
f . seek ( 84 )
f . write ( b " blah " )
f . flush ( )
f . close
# Validate Instance
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , options = [ " -j " , " 4 " ] )
2018-06-04 10:27:00 +02:00
self . assertEqual ( 1 , 0 , " Expecting Error because of data files corruption. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertTrue (
' INFO: Validating backup {0} ' . format ( backup_id_1 ) in e . message
and " INFO: Validate backups of the instance ' node ' " in e . message
2018-12-27 21:40:23 +02:00
and ' WARNING: Invalid CRC of backup file ' in e . message
2018-06-04 10:27:00 +02:00
and ' WARNING: Backup {0} data files are corrupted ' . format ( backup_id_1 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
self . assertEqual ( ' CORRUPT ' , self . show_pb ( backup_dir , ' node ' , backup_id_1 ) [ ' status ' ] , ' Backup STATUS should be " CORRUPT " ' )
self . assertEqual ( ' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_2 ) [ ' status ' ] , ' Backup STATUS should be " ORPHAN " ' )
self . assertEqual ( ' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_3 ) [ ' status ' ] , ' Backup STATUS should be " ORPHAN " ' )
self . assertEqual ( ' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_4 ) [ ' status ' ] , ' Backup STATUS should be " OK " ' )
self . assertEqual ( ' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_5 ) [ ' status ' ] , ' Backup STATUS should be " OK " ' )
node . cleanup ( )
restore_out = self . restore_node (
backup_dir , ' node ' , node ,
options = [ " --no-validate " ] )
self . assertIn (
" INFO: Restore of backup {0} completed. " . format ( backup_id_5 ) ,
restore_out ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( self . output ) , self . cmd ) )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-12-19 11:56:03 +02:00
# @unittest.skip("skip")
def test_validate_instance_with_corrupted_full ( self ) :
""" make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups,
corrupt file in FULL backup and run validate on instance ,
expect FULL to gain status CORRUPT , PAGE1 and PAGE2 to gain status ORPHAN """
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-12-26 21:59:13 +02:00
node = self . make_simple_node ( base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2017-12-19 11:56:03 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-12-19 11:56:03 +02:00
node . safe_psql (
" postgres " ,
2018-12-27 21:40:23 +02:00
" create table t_heap as select i as id, "
" md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
file_path_t_heap = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap ' ) " ) . rstrip ( )
# FULL1
backup_id_1 = self . backup_node ( backup_dir , ' node ' , node )
node . safe_psql (
" postgres " ,
2018-12-27 21:40:23 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
2017-12-19 11:56:03 +02:00
# PAGE1
2018-12-27 21:40:23 +02:00
backup_id_2 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# PAGE2
node . safe_psql (
" postgres " ,
2018-12-27 21:40:23 +02:00
" insert into t_heap select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(20000,30000) i " )
backup_id_3 = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2017-12-19 11:56:03 +02:00
# FULL1
2018-12-27 21:40:23 +02:00
backup_id_4 = self . backup_node (
backup_dir , ' node ' , node )
2017-12-19 11:56:03 +02:00
# PAGE3
node . safe_psql (
" postgres " ,
2018-12-27 21:40:23 +02:00
" insert into t_heap select i as id, "
" md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(30000,40000) i " )
2017-12-19 11:56:03 +02:00
backup_id_5 = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# Corrupt some file in FULL backup
2018-12-27 21:40:23 +02:00
file_full = os . path . join (
backup_dir , ' backups ' , ' node ' ,
backup_id_1 , ' database ' , file_path_t_heap )
2017-12-19 11:56:03 +02:00
with open ( file_full , " rb+ " , 0 ) as f :
f . seek ( 84 )
f . write ( b " blah " )
f . flush ( )
f . close
# Validate Instance
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , options = [ " -j " , " 4 " ] )
2018-12-27 21:40:23 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data files corruption. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-12-19 11:56:03 +02:00
except ProbackupException as e :
self . assertTrue (
' INFO: Validating backup {0} ' . format ( backup_id_1 ) in e . message
and " INFO: Validate backups of the instance ' node ' " in e . message
2018-12-27 21:40:23 +02:00
and ' WARNING: Invalid CRC of backup file ' in e . message
2017-12-19 11:56:03 +02:00
and ' WARNING: Backup {0} data files are corrupted ' . format ( backup_id_1 ) in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
self . assertEqual ( ' CORRUPT ' , self . show_pb ( backup_dir , ' node ' , backup_id_1 ) [ ' status ' ] , ' Backup STATUS should be " CORRUPT " ' )
self . assertEqual ( ' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_2 ) [ ' status ' ] , ' Backup STATUS should be " ORPHAN " ' )
self . assertEqual ( ' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , backup_id_3 ) [ ' status ' ] , ' Backup STATUS should be " ORPHAN " ' )
self . assertEqual ( ' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_4 ) [ ' status ' ] , ' Backup STATUS should be " OK " ' )
self . assertEqual ( ' OK ' , self . show_pb ( backup_dir , ' node ' , backup_id_5 ) [ ' status ' ] , ' Backup STATUS should be " OK " ' )
# Clean after yourself
2018-05-03 13:12:19 +02:00
self . del_test_dir ( module_name , fname )
2017-12-19 11:56:03 +02:00
2017-05-25 11:53:33 +02:00
# @unittest.skip("skip")
def test_validate_corrupt_wal_1 ( self ) :
2017-12-19 11:56:03 +02:00
""" make archive node, take FULL1, PAGE1,PAGE2,FULL2,PAGE3,PAGE4 backups, corrupt all wal files, run validate, expect errors """
2017-05-22 13:17:43 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-01-18 03:35:27 +02:00
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2017-07-12 16:28:28 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
2017-06-20 12:57:23 +02:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-06-20 12:57:23 +02:00
2017-12-19 11:56:03 +02:00
backup_id_1 = self . backup_node ( backup_dir , ' node ' , node )
2017-12-14 20:33:48 +02:00
2017-05-22 13:17:43 +02:00
with node . connect ( " postgres " ) as con :
con . execute ( " CREATE TABLE tbl0005 (a text) " )
con . commit ( )
2017-05-05 15:21:49 +02:00
2017-12-19 11:56:03 +02:00
backup_id_2 = self . backup_node ( backup_dir , ' node ' , node )
2017-05-22 13:17:43 +02:00
# Corrupt WAL
2017-06-20 12:57:23 +02:00
wals_dir = os . path . join ( backup_dir , ' wal ' , ' node ' )
2017-05-05 15:21:49 +02:00
wals = [ f for f in os . listdir ( wals_dir ) if os . path . isfile ( os . path . join ( wals_dir , f ) ) and not f . endswith ( ' .backup ' ) ]
wals . sort ( )
for wal in wals :
2017-06-27 07:42:52 +02:00
with open ( os . path . join ( wals_dir , wal ) , " rb+ " , 0 ) as f :
f . seek ( 42 )
f . write ( b " blablablaadssaaaaaaaaaaaaaaa " )
f . flush ( )
f . close
2017-05-05 15:21:49 +02:00
2017-05-22 13:17:43 +02:00
# Simple validate
2017-05-05 15:21:49 +02:00
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , options = [ " -j " , " 4 " ] )
2018-01-18 03:35:27 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of wal segments corruption. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-06-27 07:42:52 +02:00
except ProbackupException as e :
2018-01-18 03:35:27 +02:00
self . assertTrue (
' WARNING: Backup ' in e . message and
' WAL segments are corrupted ' in e . message and
" WARNING: There are not enough WAL "
" records to consistenly restore backup " in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-05-22 13:17:43 +02:00
2018-01-18 03:35:27 +02:00
self . assertEqual (
' CORRUPT ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_1 ) [ ' status ' ] ,
' Backup STATUS should be " CORRUPT " ' )
self . assertEqual (
' CORRUPT ' ,
self . show_pb ( backup_dir , ' node ' , backup_id_2 ) [ ' status ' ] ,
' Backup STATUS should be " CORRUPT " ' )
2017-06-27 07:42:52 +02:00
# Clean after yourself
2017-07-12 16:28:28 +02:00
self . del_test_dir ( module_name , fname )
2017-05-05 15:21:49 +02:00
2017-05-25 11:53:33 +02:00
# @unittest.skip("skip")
def test_validate_corrupt_wal_2 ( self ) :
2017-06-20 12:57:23 +02:00
""" make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors """
2017-05-22 13:17:43 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-12-26 21:59:13 +02:00
node = self . make_simple_node ( base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2017-07-12 16:28:28 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
2017-06-20 12:57:23 +02:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-06-20 12:57:23 +02:00
2017-05-22 13:17:43 +02:00
with node . connect ( " postgres " ) as con :
con . execute ( " CREATE TABLE tbl0005 (a text) " )
con . commit ( )
2017-06-20 12:57:23 +02:00
backup_id = self . backup_node ( backup_dir , ' node ' , node )
2017-05-22 13:17:43 +02:00
target_xid = None
with node . connect ( " postgres " ) as con :
2018-06-02 19:35:37 +02:00
res = con . execute (
" INSERT INTO tbl0005 VALUES ( ' inserted ' ) RETURNING (xmin) " )
2017-05-22 13:17:43 +02:00
con . commit ( )
target_xid = res [ 0 ] [ 0 ]
# Corrupt WAL
2017-06-20 12:57:23 +02:00
wals_dir = os . path . join ( backup_dir , ' wal ' , ' node ' )
2017-05-22 13:17:43 +02:00
wals = [ f for f in os . listdir ( wals_dir ) if os . path . isfile ( os . path . join ( wals_dir , f ) ) and not f . endswith ( ' .backup ' ) ]
wals . sort ( )
for wal in wals :
2017-06-27 07:42:52 +02:00
with open ( os . path . join ( wals_dir , wal ) , " rb+ " , 0 ) as f :
2018-01-17 20:23:57 +02:00
f . seek ( 128 )
2017-06-27 07:42:52 +02:00
f . write ( b " blablablaadssaaaaaaaaaaaaaaa " )
f . flush ( )
f . close
2017-05-22 13:17:43 +02:00
2017-05-25 11:53:33 +02:00
# Validate to xid
2017-05-05 15:21:49 +02:00
try :
2018-01-17 20:23:57 +02:00
self . validate_pb (
backup_dir ,
' node ' ,
backup_id ,
options = [
2019-03-06 10:58:42 +02:00
" --xid= {0} " . format ( target_xid ) , " -j " , " 4 " ] )
2018-01-17 20:23:57 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of wal segments corruption. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-06-27 07:42:52 +02:00
except ProbackupException as e :
2018-01-17 20:23:57 +02:00
self . assertTrue (
' WARNING: Backup ' in e . message and
' WAL segments are corrupted ' in e . message and
" WARNING: There are not enough WAL "
" records to consistenly restore backup " in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertEqual (
' CORRUPT ' ,
self . show_pb ( backup_dir , ' node ' , backup_id ) [ ' status ' ] ,
' Backup STATUS should be " CORRUPT " ' )
2017-06-27 07:42:52 +02:00
# Clean after yourself
2017-07-12 16:28:28 +02:00
self . del_test_dir ( module_name , fname )
2017-05-05 15:21:49 +02:00
2017-05-25 11:53:33 +02:00
# @unittest.skip("skip")
2017-05-05 15:21:49 +02:00
def test_validate_wal_lost_segment_1 ( self ) :
2017-06-20 12:57:23 +02:00
""" make archive node, make archive full backup,
2017-05-25 11:53:33 +02:00
delete from archive wal segment which belong to previous backup
run validate , expecting error because of missing wal segment
make sure that backup status is ' CORRUPT '
"""
2017-05-05 15:21:49 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-03-15 12:34:51 +02:00
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2017-07-12 16:28:28 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
2017-06-20 12:57:23 +02:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-06-20 12:57:23 +02:00
2018-09-03 16:31:04 +02:00
node . pgbench_init ( scale = 3 )
2017-06-20 12:57:23 +02:00
backup_id = self . backup_node ( backup_dir , ' node ' , node )
2017-05-05 15:21:49 +02:00
2017-05-22 13:17:43 +02:00
# Delete wal segment
2017-06-20 12:57:23 +02:00
wals_dir = os . path . join ( backup_dir , ' wal ' , ' node ' )
2017-05-05 15:21:49 +02:00
wals = [ f for f in os . listdir ( wals_dir ) if os . path . isfile ( os . path . join ( wals_dir , f ) ) and not f . endswith ( ' .backup ' ) ]
2018-01-18 16:15:15 +02:00
wals . sort ( )
2018-02-28 22:01:22 +02:00
file = os . path . join ( backup_dir , ' wal ' , ' node ' , wals [ - 1 ] )
2017-05-25 11:53:33 +02:00
os . remove ( file )
2018-01-18 03:35:27 +02:00
# cut out '.gz'
if self . archive_compress :
file = file [ : - 3 ]
2017-05-05 15:21:49 +02:00
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , options = [ " -j " , " 4 " ] )
2018-01-18 03:35:27 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of wal segment disappearance. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-06-27 07:42:52 +02:00
except ProbackupException as e :
2018-01-18 03:35:27 +02:00
self . assertTrue (
2018-12-29 11:42:59 +02:00
" is absent " in e . message and
2018-01-18 03:35:27 +02:00
" WARNING: There are not enough WAL records to consistenly "
" restore backup {0} " . format ( backup_id ) in e . message and
" WARNING: Backup {0} WAL segments are corrupted " . format (
backup_id ) in e . message and
2018-03-15 12:34:51 +02:00
" WARNING: Some backups are not valid " in e . message ,
2018-01-18 03:35:27 +02:00
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
2017-05-22 13:17:43 +02:00
2018-01-18 03:35:27 +02:00
self . assertEqual (
' CORRUPT ' ,
self . show_pb ( backup_dir , ' node ' , backup_id ) [ ' status ' ] ,
' Backup {0} should have STATUS " CORRUPT " ' )
2017-05-25 11:53:33 +02:00
2018-03-15 12:34:51 +02:00
# Run validate again
2017-05-25 11:53:33 +02:00
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , backup_id , options = [ " -j " , " 4 " ] )
2018-01-18 03:35:27 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of backup corruption. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-06-27 07:42:52 +02:00
except ProbackupException as e :
2018-01-18 03:35:27 +02:00
self . assertIn (
2018-03-15 12:34:51 +02:00
' INFO: Revalidating backup {0} ' . format ( backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' ERROR: Backup {0} is corrupt. ' . format ( backup_id ) , e . message ,
2018-01-18 03:35:27 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
# Clean after yourself
self . del_test_dir ( module_name , fname )
# @unittest.skip("skip")
def test_validate_corrupt_wal_between_backups ( self ) :
2018-06-02 19:35:37 +02:00
"""
make archive node , make full backup , corrupt all wal files ,
run validate to real xid , expect errors
"""
2018-01-18 03:35:27 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2019-03-01 18:19:56 +02:00
initdb_params = [ ' --data-checksums ' ] )
2018-01-18 03:35:27 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-01-18 03:35:27 +02:00
backup_id = self . backup_node ( backup_dir , ' node ' , node )
# make some wals
2018-09-03 16:31:04 +02:00
node . pgbench_init ( scale = 3 )
2018-01-18 03:35:27 +02:00
with node . connect ( " postgres " ) as con :
con . execute ( " CREATE TABLE tbl0005 (a text) " )
con . commit ( )
with node . connect ( " postgres " ) as con :
res = con . execute (
" INSERT INTO tbl0005 VALUES ( ' inserted ' ) RETURNING (xmin) " )
con . commit ( )
target_xid = res [ 0 ] [ 0 ]
if self . get_version ( node ) < self . version_to_num ( ' 10.0 ' ) :
walfile = node . safe_psql (
' postgres ' ,
' select pg_xlogfile_name(pg_current_xlog_location()) ' ) . rstrip ( )
else :
walfile = node . safe_psql (
' postgres ' ,
2018-06-02 19:35:37 +02:00
' select pg_walfile_name(pg_current_wal_lsn()) ' ) . rstrip ( )
2018-01-18 03:35:27 +02:00
if self . archive_compress :
walfile = walfile + ' .gz '
self . switch_wal_segment ( node )
# generate some wals
2018-09-03 16:31:04 +02:00
node . pgbench_init ( scale = 3 )
2018-01-18 03:35:27 +02:00
self . backup_node ( backup_dir , ' node ' , node )
# Corrupt WAL
wals_dir = os . path . join ( backup_dir , ' wal ' , ' node ' )
with open ( os . path . join ( wals_dir , walfile ) , " rb+ " , 0 ) as f :
f . seek ( 9000 )
f . write ( b " b " )
f . flush ( )
f . close
# Validate to xid
try :
self . validate_pb (
backup_dir ,
' node ' ,
backup_id ,
options = [
2019-03-06 10:58:42 +02:00
" --xid= {0} " . format ( target_xid ) , " -j " , " 4 " ] )
2018-01-18 03:35:27 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of wal segments corruption. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertTrue (
2019-03-14 17:59:28 +02:00
' ERROR: Not enough WAL records to xid ' in e . message and
' WARNING: Recovery can be done up to time ' in e . message and
" ERROR: Not enough WAL records to xid {0} \n " . format (
2018-01-18 03:35:27 +02:00
target_xid ) ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertEqual (
' OK ' ,
2018-06-02 19:35:37 +02:00
self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] ,
2018-01-18 03:35:27 +02:00
' Backup STATUS should be " OK " ' )
self . assertEqual (
' OK ' ,
2018-06-02 19:35:37 +02:00
self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] ,
2018-01-18 03:35:27 +02:00
' Backup STATUS should be " OK " ' )
2017-06-27 07:42:52 +02:00
# Clean after yourself
2017-07-12 16:28:28 +02:00
self . del_test_dir ( module_name , fname )
2017-09-28 09:32:06 +02:00
# @unittest.skip("skip")
def test_pgpro702_688 ( self ) :
2018-08-12 14:10:22 +02:00
"""
make node without archiving , make stream backup ,
get Recovery Time , validate to Recovery Time
"""
2017-09-28 09:32:06 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-03-15 12:34:51 +02:00
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2017-09-28 09:32:06 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2017-09-28 09:32:06 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-09-28 09:32:06 +02:00
2018-03-15 12:34:51 +02:00
backup_id = self . backup_node (
backup_dir , ' node ' , node , options = [ " --stream " ] )
recovery_time = self . show_pb (
backup_dir , ' node ' , backup_id = backup_id ) [ ' recovery-time ' ]
2017-09-28 09:32:06 +02:00
2017-10-09 14:32:48 +02:00
try :
2018-03-15 12:34:51 +02:00
self . validate_pb (
backup_dir , ' node ' ,
2019-03-06 10:58:42 +02:00
options = [ " --time= {0} " . format ( recovery_time ) , " -j " , " 4 " ] )
2018-03-15 12:34:51 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of wal segment disappearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
2017-10-09 14:32:48 +02:00
except ProbackupException as e :
2018-03-15 12:34:51 +02:00
self . assertIn (
' WAL archive is empty. You cannot restore backup to a '
' recovery target without WAL archive ' , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2017-09-28 09:32:06 +02:00
# Clean after yourself
self . del_test_dir ( module_name , fname )
# @unittest.skip("skip")
def test_pgpro688 ( self ) :
2018-08-12 14:10:22 +02:00
"""
make node with archiving , make backup , get Recovery Time ,
validate to Recovery Time . Waiting PGPRO - 688. RESOLVED
"""
2017-09-28 09:32:06 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-03-15 12:34:51 +02:00
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2017-09-28 09:32:06 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2017-09-28 09:32:06 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2017-09-28 09:32:06 +02:00
backup_id = self . backup_node ( backup_dir , ' node ' , node )
2018-08-12 14:10:22 +02:00
recovery_time = self . show_pb (
backup_dir , ' node ' , backup_id ) [ ' recovery-time ' ]
2017-09-28 09:32:06 +02:00
2018-08-12 14:10:22 +02:00
self . validate_pb (
2019-03-06 10:58:42 +02:00
backup_dir , ' node ' , options = [ " --time= {0} " . format ( recovery_time ) ,
" -j " , " 4 " ] )
2017-09-28 09:32:06 +02:00
# Clean after yourself
self . del_test_dir ( module_name , fname )
# @unittest.skip("skip")
2018-03-17 00:41:35 +02:00
# @unittest.expectedFailure
2017-09-28 09:32:06 +02:00
def test_pgpro561 ( self ) :
2018-03-17 00:41:35 +02:00
"""
make node with archiving , make stream backup ,
restore it to node1 , check that archiving is not successful on node1
"""
2017-09-28 09:32:06 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2018-03-15 12:34:51 +02:00
node1 = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node1 ' ) ,
2017-09-28 09:32:06 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2017-09-28 09:32:06 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node1 ' , node1 )
self . set_archiving ( backup_dir , ' node1 ' , node1 )
2018-12-25 16:48:49 +02:00
node1 . slow_start ( )
2017-09-28 09:32:06 +02:00
2018-03-17 00:41:35 +02:00
backup_id = self . backup_node (
backup_dir , ' node1 ' , node1 , options = [ " --stream " ] )
2017-09-28 09:32:06 +02:00
2018-03-17 00:41:35 +02:00
node2 = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node2 ' ) )
2017-09-28 09:32:06 +02:00
node2 . cleanup ( )
node1 . psql (
" postgres " ,
2018-03-17 00:41:35 +02:00
" create table t_heap as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,256) i " )
2017-09-28 09:32:06 +02:00
2018-03-17 00:41:35 +02:00
self . backup_node (
backup_dir , ' node1 ' , node1 ,
backup_type = ' page ' , options = [ " --stream " ] )
2017-09-28 09:32:06 +02:00
self . restore_node ( backup_dir , ' node1 ' , data_dir = node2 . data_dir )
2018-11-28 20:19:10 +02:00
2018-03-17 00:41:35 +02:00
node2 . append_conf (
' postgresql.auto.conf ' , ' port = {0} ' . format ( node2 . port ) )
2018-11-28 20:19:10 +02:00
node2 . append_conf (
' postgresql.auto.conf ' , ' archive_mode = off ' )
2018-07-11 09:50:38 +02:00
node2 . slow_start ( )
2017-09-28 09:32:06 +02:00
2018-11-28 20:19:10 +02:00
node2 . append_conf (
' postgresql.auto.conf ' , ' archive_mode = on ' )
2018-12-27 16:19:23 +02:00
node2 . stop ( )
node2 . slow_start ( )
2018-11-28 20:19:10 +02:00
2017-09-28 09:32:06 +02:00
timeline_node1 = node1 . get_control_data ( ) [ " Latest checkpoint ' s TimeLineID " ]
timeline_node2 = node2 . get_control_data ( ) [ " Latest checkpoint ' s TimeLineID " ]
2018-03-17 00:41:35 +02:00
self . assertEqual (
timeline_node1 , timeline_node2 ,
" Timelines on Master and Node1 should be equal. "
" This is unexpected " )
archive_command_node1 = node1 . safe_psql (
" postgres " , " show archive_command " )
archive_command_node2 = node2 . safe_psql (
" postgres " , " show archive_command " )
self . assertEqual (
archive_command_node1 , archive_command_node2 ,
" Archive command on Master and Node should be equal. "
" This is unexpected " )
2017-09-28 09:32:06 +02:00
2018-03-17 00:41:35 +02:00
# result = node2.safe_psql("postgres", "select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL")
2017-12-19 11:56:03 +02:00
## self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip()))
2018-03-17 00:41:35 +02:00
# if result == "":
2018-09-06 19:46:32 +02:00
# self.assertEqual(1, 0, 'Error is expected due to Master and Node1 having the common archive and archive_command')
2017-12-19 11:56:03 +02:00
2019-04-20 11:42:17 +02:00
node1 . psql (
" postgres " ,
" create table t_heap_1 as select i as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10) i " )
2018-03-17 00:41:35 +02:00
self . switch_wal_segment ( node1 )
2019-04-20 11:42:17 +02:00
# wals_dir = os.path.join(backup_dir, 'wal', 'node1')
# wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
# wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.partial')]
# wals = map(str, wals)
# print(wals)
2018-03-17 00:41:35 +02:00
self . switch_wal_segment ( node2 )
2019-04-20 11:42:17 +02:00
# wals_dir = os.path.join(backup_dir, 'wal', 'node1')
# wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
# wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.partial')]
# wals = map(str, wals)
# print(wals)
2018-03-17 00:41:35 +02:00
time . sleep ( 5 )
2017-12-19 11:56:03 +02:00
log_file = os . path . join ( node2 . logs_dir , ' postgresql.log ' )
with open ( log_file , ' r ' ) as f :
log_content = f . read ( )
2018-03-17 00:41:35 +02:00
self . assertTrue (
' LOG: archive command failed with exit code 1 ' in log_content and
' DETAIL: The failed archive command was: ' in log_content and
' INFO: pg_probackup archive-push from ' in log_content ,
' Expecting error messages about failed archive_command '
2017-12-19 11:56:03 +02:00
)
2018-03-17 00:41:35 +02:00
self . assertFalse (
' pg_probackup archive-push completed successfully ' in log_content )
2017-09-28 09:32:06 +02:00
# Clean after yourself
2018-02-28 22:01:22 +02:00
self . del_test_dir ( module_name , fname )
2018-03-15 12:34:51 +02:00
# @unittest.skip("skip")
def test_validate_corrupted_full ( self ) :
"""
make node with archiving , take full backup , and three page backups ,
take another full backup and three page backups
corrupt second full backup , run validate , check that
second full backup became CORRUPT and his page backups are ORPHANs
remove corruption and run valudate again , check that
second full backup and his page backups are OK
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2018-03-15 12:34:51 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2018-10-11 15:26:23 +02:00
pg_options = {
' max_wal_senders ' : ' 2 ' ,
2019-03-01 18:19:56 +02:00
' checkpoint_timeout ' : ' 30 ' } )
2018-03-15 12:34:51 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-03-15 12:34:51 +02:00
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
backup_id = self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
node . safe_psql (
" postgres " ,
" alter system set archive_command = ' false ' " )
node . reload ( )
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = ' page ' , options = [ ' --archive-timeout=1s ' ] )
self . assertEqual (
1 , 0 ,
" Expecting Error because of data file dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
pass
2018-10-11 15:26:23 +02:00
2018-03-17 00:41:35 +02:00
self . assertTrue (
2018-06-02 19:35:37 +02:00
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ERROR ' )
2018-03-15 12:34:51 +02:00
self . set_archiving ( backup_dir , ' node ' , node )
node . reload ( )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
file = os . path . join (
backup_dir , ' backups ' , ' node ' ,
backup_id , ' database ' , ' postgresql.auto.conf ' )
file_new = os . path . join ( backup_dir , ' postgresql.auto.conf ' )
os . rename ( file , file_new )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , options = [ " -j " , " 4 " ] )
2018-03-15 12:34:51 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data file dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' Validating backup {0} ' . format ( backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} data files are corrupted ' . format (
backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Some backups are not valid ' . format (
backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2018-06-02 19:35:37 +02:00
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue (
self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' CORRUPT ' )
self . assertTrue (
self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ERROR ' )
self . assertTrue (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
2018-03-15 12:34:51 +02:00
os . rename ( file_new , file )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , options = [ " -j " , " 4 " ] )
2018-03-15 12:34:51 +02:00
except ProbackupException as e :
self . assertIn (
' WARNING: Some backups are not valid ' . format (
backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2018-06-02 19:35:37 +02:00
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' OK ' )
self . assertTrue (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ERROR ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' OK ' )
2018-03-15 12:34:51 +02:00
# Clean after yourself
self . del_test_dir ( module_name , fname )
# @unittest.skip("skip")
def test_validate_corrupted_full_1 ( self ) :
"""
make node with archiving , take full backup , and three page backups ,
2018-03-15 12:57:17 +02:00
take another full backup and four page backups
2018-03-15 12:34:51 +02:00
corrupt second full backup , run validate , check that
second full backup became CORRUPT and his page backups are ORPHANs
2018-03-15 12:57:17 +02:00
remove corruption from full backup and corrupt his second page backup
run valudate again , check that
second full backup and his firts page backups are OK ,
second page should be CORRUPT
third page should be ORPHAN
2018-03-15 12:34:51 +02:00
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2018-03-15 12:34:51 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2018-03-15 12:34:51 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-03-15 12:34:51 +02:00
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
backup_id = self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
2018-03-17 00:41:35 +02:00
backup_id_page = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
2018-03-15 12:34:51 +02:00
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
file = os . path . join (
backup_dir , ' backups ' , ' node ' ,
backup_id , ' database ' , ' postgresql.auto.conf ' )
file_new = os . path . join ( backup_dir , ' postgresql.auto.conf ' )
os . rename ( file , file_new )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , options = [ " -j " , " 4 " ] )
2018-03-15 12:34:51 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data file dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' Validating backup {0} ' . format ( backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} data files are corrupted ' . format (
backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Some backups are not valid ' . format (
backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2018-06-02 19:35:37 +02:00
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
2018-09-03 16:31:04 +02:00
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' CORRUPT ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
2018-03-15 12:34:51 +02:00
os . rename ( file_new , file )
2018-10-11 15:26:23 +02:00
2018-03-15 12:34:51 +02:00
file = os . path . join (
backup_dir , ' backups ' , ' node ' ,
2018-10-11 15:26:23 +02:00
backup_id_page , ' database ' , ' backup_label ' )
2018-03-15 12:34:51 +02:00
2018-10-11 15:26:23 +02:00
file_new = os . path . join ( backup_dir , ' backup_label ' )
2018-03-15 12:34:51 +02:00
os . rename ( file , file_new )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , options = [ " -j " , " 4 " ] )
2018-03-15 12:34:51 +02:00
except ProbackupException as e :
self . assertIn (
' WARNING: Some backups are not valid ' . format (
backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2018-06-02 19:35:37 +02:00
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' CORRUPT ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
2017-09-28 09:32:06 +02:00
# Clean after yourself
2018-02-28 22:01:22 +02:00
self . del_test_dir ( module_name , fname )
2018-07-09 12:57:10 +02:00
2018-09-03 16:31:04 +02:00
# @unittest.skip("skip")
def test_validate_corrupted_full_2 ( self ) :
"""
PAGE2_2b
PAGE2_2a
PAGE2_4
PAGE2_4 < - validate
PAGE2_3
PAGE2_2 < - CORRUPT
PAGE2_1
FULL2
PAGE1_1
FULL1
corrupt second page backup , run validate on PAGE2_3 , check that
PAGE2_2 became CORRUPT and his descendants are ORPHANs ,
take two more PAGE backups , which now trace their origin
to PAGE2_1 - latest OK backup ,
run validate on PAGE2_3 , check that PAGE2_2a and PAGE2_2b are OK ,
remove corruption from PAGE2_2 and run validate on PAGE2_4
"""
2018-07-09 12:57:10 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2018-09-03 16:31:04 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2018-07-09 12:57:10 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-07-09 12:57:10 +02:00
2018-09-03 16:31:04 +02:00
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
2018-07-09 12:57:10 +02:00
2018-09-03 16:31:04 +02:00
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
corrupt_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
validate_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
2018-07-09 12:57:10 +02:00
2018-09-03 16:31:04 +02:00
file = os . path . join (
backup_dir , ' backups ' , ' node ' ,
2018-10-11 15:26:23 +02:00
corrupt_id , ' database ' , ' backup_label ' )
2018-07-09 12:57:10 +02:00
2018-10-11 15:26:23 +02:00
file_new = os . path . join ( backup_dir , ' backup_label ' )
2018-09-03 16:31:04 +02:00
os . rename ( file , file_new )
2018-07-09 12:57:10 +02:00
2018-09-03 16:31:04 +02:00
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , validate_id ,
options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data file dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' INFO: Validating parents for backup {0} ' . format ( validate_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' INFO: Validating backup {0} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' id ' ] ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' INFO: Validating backup {0} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' id ' ] ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' INFO: Validating backup {0} ' . format (
corrupt_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} data files are corrupted ' . format (
corrupt_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2018-07-09 12:57:10 +02:00
2018-09-03 16:31:04 +02:00
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' CORRUPT ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
# THIS IS GOLD!!!!
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
2018-07-09 12:57:10 +02:00
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data file dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' Backup {0} data files are valid ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' id ' ] ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' Backup {0} data files are valid ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' id ' ] ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has parent {1} with status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] , corrupt_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has parent {1} with status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] , corrupt_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has parent {1} with status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , corrupt_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' INFO: Revalidating backup {0} ' . format (
corrupt_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Some backups are not valid ' , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' CORRUPT ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
# revalidate again
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , validate_id ,
options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data file dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
2018-07-09 12:57:10 +02:00
except ProbackupException as e :
2018-09-03 16:31:04 +02:00
self . assertIn (
' WARNING: Backup {0} has status: ORPHAN ' . format ( validate_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' Backup {0} has parent {1} with status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] , corrupt_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' Backup {0} has parent {1} with status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] , corrupt_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' Backup {0} has parent {1} with status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , corrupt_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' INFO: Validating parents for backup {0} ' . format (
validate_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' INFO: Validating backup {0} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' id ' ] ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' INFO: Validating backup {0} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' id ' ] ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' INFO: Revalidating backup {0} ' . format (
corrupt_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} data files are corrupted ' . format (
corrupt_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' ERROR: Backup {0} is orphan. ' . format (
validate_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
# Fix CORRUPT
os . rename ( file_new , file )
2019-03-06 10:58:42 +02:00
output = self . validate_pb ( backup_dir , ' node ' , validate_id ,
options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertIn (
' WARNING: Backup {0} has status: ORPHAN ' . format ( validate_id ) ,
output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' Backup {0} has parent {1} with status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] , corrupt_id ) ,
output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' Backup {0} has parent {1} with status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] , corrupt_id ) ,
output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' Backup {0} has parent {1} with status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , corrupt_id ) ,
output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' INFO: Validating parents for backup {0} ' . format (
validate_id ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' INFO: Validating backup {0} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' id ' ] ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' INFO: Validating backup {0} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' id ' ] ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' INFO: Revalidating backup {0} ' . format (
corrupt_id ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' Backup {0} data files are valid ' . format (
corrupt_id ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' INFO: Revalidating backup {0} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' Backup {0} data files are valid ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' INFO: Revalidating backup {0} ' . format (
validate_id ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' Backup {0} data files are valid ' . format (
validate_id ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' INFO: Backup {0} WAL segments are valid ' . format (
validate_id ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' INFO: Backup {0} is valid. ' . format (
validate_id ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' INFO: Validate of backup {0} completed. ' . format (
validate_id ) , output ,
' \n Unexpected Output Message: {0} \n ' . format (
repr ( output ) ) )
# Now we have two perfectly valid backup chains based on FULL2
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
2018-07-09 12:57:10 +02:00
# Clean after yourself
self . del_test_dir ( module_name , fname )
2018-09-03 16:31:04 +02:00
# @unittest.skip("skip")
def test_validate_corrupted_full_missing ( self ) :
"""
make node with archiving , take full backup , and three page backups ,
take another full backup and four page backups
corrupt second full backup , run validate , check that
second full backup became CORRUPT and his page backups are ORPHANs
remove corruption from full backup and remove his second page backup
run valudate again , check that
second full backup and his firts page backups are OK ,
third page should be ORPHAN
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2018-09-03 16:31:04 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2018-09-03 16:31:04 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-09-03 16:31:04 +02:00
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
backup_id = self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
backup_id_page = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
file = os . path . join (
backup_dir , ' backups ' , ' node ' ,
backup_id , ' database ' , ' postgresql.auto.conf ' )
file_new = os . path . join ( backup_dir , ' postgresql.auto.conf ' )
os . rename ( file , file_new )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of data file dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' Validating backup {0} ' . format ( backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} data files are corrupted ' . format (
backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} has status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , backup_id ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' CORRUPT ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
# Full backup is fixed
os . rename ( file_new , file )
# break PAGE
old_directory = os . path . join (
backup_dir , ' backups ' , ' node ' , backup_id_page )
new_directory = os . path . join ( backup_dir , backup_id_page )
os . rename ( old_directory , new_directory )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
except ProbackupException as e :
self . assertIn (
' WARNING: Some backups are not valid ' , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] ,
backup_id_page ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] ,
backup_id_page ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has parent {1} with status: CORRUPT ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , backup_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
# missing backup is here
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
# validate should be idempotent - user running validate
# second time must be provided with ID of missing backup
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
except ProbackupException as e :
self . assertIn (
' WARNING: Some backups are not valid ' , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] ,
backup_id_page ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] ,
backup_id_page ) , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
# missing backup is here
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
# fix missing PAGE backup
os . rename ( new_directory , old_directory )
# exit(1)
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
2019-03-06 10:58:42 +02:00
output = self . validate_pb ( backup_dir , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertIn (
' INFO: All backups are valid ' ,
output ,
' \n Unexpected Error Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' WARNING: Backup {0} has parent {1} with status: ORPHAN ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' id ' ] ,
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] ) ,
output ,
' \n Unexpected Error Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' WARNING: Backup {0} has parent {1} with status: ORPHAN ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] ,
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] ) ,
output ,
' \n Unexpected Error Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' Revalidating backup {0} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] ) ,
output ,
' \n Unexpected Error Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' Revalidating backup {0} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] ) ,
output ,
' \n Unexpected Error Message: {0} \n ' . format (
repr ( output ) ) )
self . assertIn (
' Revalidating backup {0} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' id ' ] ) ,
output ,
' \n Unexpected Error Message: {0} \n ' . format (
repr ( output ) ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
# Clean after yourself
self . del_test_dir ( module_name , fname )
def test_file_size_corruption_no_validate ( self ) :
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2018-09-03 16:31:04 +02:00
# initdb_params=['--data-checksums'],
)
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-09-03 16:31:04 +02:00
node . safe_psql (
" postgres " ,
" create table t_heap as select 1 as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,1000) i " )
node . safe_psql (
" postgres " ,
" CHECKPOINT; " )
heap_path = node . safe_psql (
" postgres " ,
" select pg_relation_filepath( ' t_heap ' ) " ) . rstrip ( )
heap_size = node . safe_psql (
" postgres " ,
" select pg_relation_size( ' t_heap ' ) " )
backup_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = " full " ,
2019-01-10 17:12:00 +02:00
options = [ " -j " , " 4 " ] , asynchronous = False , gdb = False )
2018-09-03 16:31:04 +02:00
node . stop ( )
node . cleanup ( )
# Let`s do file corruption
with open (
os . path . join (
backup_dir , " backups " , ' node ' , backup_id ,
" database " , heap_path ) , " rb+ " , 0 ) as f :
f . truncate ( int ( heap_size ) - 4096 )
f . flush ( )
f . close
node . cleanup ( )
try :
self . restore_node (
backup_dir , ' node ' , node ,
options = [ " --no-validate " ] )
except ProbackupException as e :
self . assertTrue (
" ERROR: Data files restoring failed " in e . message ,
repr ( e . message ) )
# print "\nExpected error: \n" + e.message
# Clean after yourself
self . del_test_dir ( module_name , fname )
# @unittest.skip("skip")
def test_validate_specific_backup_with_missing_backup ( self ) :
"""
PAGE3_2
PAGE3_1
FULL3
PAGE2_5
PAGE2_4 < - validate
PAGE2_3
PAGE2_2 < - missing
PAGE2_1
FULL2
PAGE1_2
PAGE1_1
FULL1
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2018-09-03 16:31:04 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2018-09-03 16:31:04 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-09-03 16:31:04 +02:00
# CHAIN1
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# CHAIN2
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
missing_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
validate_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# CHAIN3
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
old_directory = os . path . join ( backup_dir , ' backups ' , ' node ' , missing_id )
new_directory = os . path . join ( backup_dir , missing_id )
os . rename ( old_directory , new_directory )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , validate_id ,
options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of backup dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] , missing_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] , missing_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , missing_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 10 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
# missing backup
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , validate_id ,
options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of backup dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] , missing_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] , missing_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , missing_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
os . rename ( new_directory , old_directory )
# Revalidate backup chain
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , validate_id , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 11 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 10 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
# Clean after yourself
self . del_test_dir ( module_name , fname )
# @unittest.skip("skip")
def test_validate_specific_backup_with_missing_backup_1 ( self ) :
"""
PAGE3_2
PAGE3_1
FULL3
PAGE2_5
PAGE2_4 < - validate
PAGE2_3
PAGE2_2 < - missing
PAGE2_1
FULL2 < - missing
PAGE1_2
PAGE1_1
FULL1
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2018-09-03 16:31:04 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2018-09-03 16:31:04 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-09-03 16:31:04 +02:00
# CHAIN1
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# CHAIN2
missing_full_id = self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
missing_page_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
validate_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# CHAIN3
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
page_old_directory = os . path . join (
backup_dir , ' backups ' , ' node ' , missing_page_id )
page_new_directory = os . path . join ( backup_dir , missing_page_id )
os . rename ( page_old_directory , page_new_directory )
full_old_directory = os . path . join (
backup_dir , ' backups ' , ' node ' , missing_full_id )
full_new_directory = os . path . join ( backup_dir , missing_full_id )
os . rename ( full_old_directory , full_new_directory )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , validate_id ,
options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of backup dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] , missing_page_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , missing_page_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' id ' ] , missing_page_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' ORPHAN ' )
# PAGE2_1
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' ) # <- SHit
# FULL2
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
os . rename ( page_new_directory , page_old_directory )
os . rename ( full_new_directory , full_old_directory )
# Revalidate backup chain
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , validate_id , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 11 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 10 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' ORPHAN ' ) # <- Fail
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
# Clean after yourself
self . del_test_dir ( module_name , fname )
# @unittest.skip("skip")
def test_validate_with_missing_backup_1 ( self ) :
"""
PAGE3_2
PAGE3_1
FULL3
PAGE2_5
PAGE2_4 < - validate
PAGE2_3
PAGE2_2 < - missing
PAGE2_1
FULL2 < - missing
PAGE1_2
PAGE1_1
FULL1
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2018-09-03 16:31:04 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2018-09-03 16:31:04 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-09-03 16:31:04 +02:00
# CHAIN1
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# CHAIN2
missing_full_id = self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
missing_page_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
validate_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# CHAIN3
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# Break PAGE
page_old_directory = os . path . join (
backup_dir , ' backups ' , ' node ' , missing_page_id )
page_new_directory = os . path . join ( backup_dir , missing_page_id )
os . rename ( page_old_directory , page_new_directory )
# Break FULL
full_old_directory = os . path . join (
backup_dir , ' backups ' , ' node ' , missing_full_id )
full_new_directory = os . path . join ( backup_dir , missing_full_id )
os . rename ( full_old_directory , full_new_directory )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , validate_id ,
options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of backup dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] , missing_page_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , missing_page_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' id ' ] , missing_page_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' ORPHAN ' )
# PAGE2_2 is missing
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
# FULL1 - is missing
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
os . rename ( page_new_directory , page_old_directory )
# Revalidate backup chain
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , validate_id ,
options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of backup dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' WARNING: Backup {0} has status: ORPHAN ' . format (
validate_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] ,
missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] ,
missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] ,
missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' id ' ] ,
missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' id ' ] ,
missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 10 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' ORPHAN ' )
# FULL1 - is missing
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
os . rename ( full_new_directory , full_old_directory )
# Revalidate chain
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , validate_id , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 11 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 10 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
# Clean after yourself
self . del_test_dir ( module_name , fname )
# @unittest.skip("skip")
def test_validate_with_missing_backup_2 ( self ) :
"""
PAGE3_2
PAGE3_1
FULL3
PAGE2_5
PAGE2_4
PAGE2_3
PAGE2_2 < - missing
PAGE2_1
FULL2 < - missing
PAGE1_2
PAGE1_1
FULL1
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2018-09-03 16:31:04 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2018-09-03 16:31:04 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-09-03 16:31:04 +02:00
# CHAIN1
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# CHAIN2
missing_full_id = self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
missing_page_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
# CHAIN3
self . backup_node ( backup_dir , ' node ' , node )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
page_old_directory = os . path . join ( backup_dir , ' backups ' , ' node ' , missing_page_id )
page_new_directory = os . path . join ( backup_dir , missing_page_id )
os . rename ( page_old_directory , page_new_directory )
full_old_directory = os . path . join ( backup_dir , ' backups ' , ' node ' , missing_full_id )
full_new_directory = os . path . join ( backup_dir , missing_full_id )
os . rename ( full_old_directory , full_new_directory )
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of backup dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] , missing_page_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , missing_page_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' id ' ] , missing_page_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' id ' ] , missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' ORPHAN ' )
# PAGE2_2 is missing
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' ORPHAN ' )
# FULL1 - is missing
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
os . rename ( page_new_directory , page_old_directory )
# Revalidate backup chain
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , options = [ " -j " , " 4 " ] )
2018-09-03 16:31:04 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of backup dissapearance. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' id ' ] , missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' id ' ] , missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' id ' ] , missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} is orphaned because his parent {1} is missing ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' id ' ] , missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
' WARNING: Backup {0} has missing parent {1} ' . format (
self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' id ' ] , missing_full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 10 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 9 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 8 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 7 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 6 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 5 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 4 ] [ ' status ' ] == ' ORPHAN ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 3 ] [ ' status ' ] == ' ORPHAN ' )
# FULL1 - is missing
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 2 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] == ' OK ' )
self . assertTrue ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] == ' OK ' )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2018-10-22 09:01:53 +02:00
# @unittest.skip("skip")
def test_corrupt_pg_control_via_resetxlog ( self ) :
""" PGPRO-2096 """
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
2018-12-26 21:59:13 +02:00
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
2018-10-22 09:01:53 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2019-03-01 18:19:56 +02:00
pg_options = { ' max_wal_senders ' : ' 2 ' } )
2018-10-22 09:01:53 +02:00
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 16:48:49 +02:00
node . slow_start ( )
2018-10-22 09:01:53 +02:00
backup_id = self . backup_node ( backup_dir , ' node ' , node )
if self . get_version ( node ) < 100000 :
pg_resetxlog_path = self . get_bin_path ( ' pg_resetxlog ' )
wal_dir = ' pg_xlog '
else :
pg_resetxlog_path = self . get_bin_path ( ' pg_resetwal ' )
wal_dir = ' pg_wal '
os . mkdir (
os . path . join (
backup_dir , ' backups ' , ' node ' , backup_id , ' database ' , wal_dir , ' archive_status ' ) )
pg_control_path = os . path . join (
backup_dir , ' backups ' , ' node ' ,
backup_id , ' database ' , ' global ' , ' pg_control ' )
md5_before = hashlib . md5 (
open ( pg_control_path , ' rb ' ) . read ( ) ) . hexdigest ( )
self . run_binary (
[
pg_resetxlog_path ,
2018-12-28 18:24:55 +02:00
' -D ' ,
2018-10-22 09:01:53 +02:00
os . path . join ( backup_dir , ' backups ' , ' node ' , backup_id , ' database ' ) ,
' -o 42 ' ,
' -f '
] ,
2019-01-10 17:12:00 +02:00
asynchronous = False )
2018-10-22 09:01:53 +02:00
md5_after = hashlib . md5 (
open ( pg_control_path , ' rb ' ) . read ( ) ) . hexdigest ( )
if self . verbose :
print ( ' \n MD5 BEFORE resetxlog: {0} \n MD5 AFTER resetxlog: {1} ' . format (
md5_before , md5_after ) )
# Validate backup
try :
2019-03-06 10:58:42 +02:00
self . validate_pb ( backup_dir , ' node ' , options = [ " -j " , " 4 " ] )
2018-10-22 09:01:53 +02:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of pg_control change. \n "
" Output: {0} \n CMD: {1} " . format (
self . output , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' data files are corrupted ' ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2018-09-03 16:31:04 +02:00
# validate empty backup list
2018-10-27 11:17:22 +02:00
# page from future during validate
# page from future during backup
# corrupt block, so file become unaligned:
# 712 Assert(header.compressed_size <= BLCKSZ);
# 713
# 714 read_len = fread(compressed_page.data, 1,
# 715 MAXALIGN(header.compressed_size), in);
# 716 if (read_len != MAXALIGN(header.compressed_size))
# -> 717 elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
# 718 blknum, file->path, read_len, header.compressed_size);