2017-05-22 13:17:43 +02:00
import unittest
import os
2017-06-27 07:42:52 +02:00
from . helpers . ptrack_helpers import ProbackupTest , ProbackupException
2017-05-22 13:17:43 +02:00
from datetime import datetime , timedelta
import subprocess
class FalsePositive ( ProbackupTest , unittest . TestCase ) :
def __init__ ( self , * args , * * kwargs ) :
super ( FalsePositive , self ) . __init__ ( * args , * * kwargs )
2017-06-20 12:57:23 +02:00
self . module_name = ' false_positive '
2017-05-22 13:17:43 +02:00
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_pgpro561 ( self ) :
"""
make node with archiving , make stream backup , restore it to node1 ,
check that archiving is not successful on node1
"""
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2017-06-27 07:42:52 +02:00
node1 = self . make_simple_node ( base_dir = " {0} / {1} /node1 " . format ( self . module_name , fname ) ,
2017-05-22 13:17:43 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' }
)
2017-06-27 07:42:52 +02:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node1 ' , node1 )
self . set_archiving ( backup_dir , ' node1 ' , node1 )
node1 . start ( )
2017-05-22 13:17:43 +02:00
2017-06-27 07:42:52 +02:00
backup_id = self . backup_node ( backup_dir , ' node1 ' , node1 , options = [ " --stream " ] )
2017-05-22 13:17:43 +02:00
2017-06-27 07:42:52 +02:00
node2 = self . make_simple_node ( base_dir = " {0} / {1} /node2 " . format ( self . module_name , fname ) )
node2 . cleanup ( )
2017-05-22 13:17:43 +02:00
2017-06-27 07:42:52 +02:00
node1 . psql (
2017-05-22 13:17:43 +02:00
" postgres " ,
" create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i " )
2017-06-27 07:42:52 +02:00
self . backup_node ( backup_dir , ' node1 ' , node1 , backup_type = ' page ' , options = [ " --stream " ] )
self . restore_node ( backup_dir , ' node1 ' , data_dir = node2 . data_dir )
node2 . append_conf ( ' postgresql.auto.conf ' , ' port = {0} ' . format ( node2 . port ) )
node2 . start ( { " -t " : " 600 " } )
2017-05-22 13:17:43 +02:00
timeline_node1 = node1 . get_control_data ( ) [ " Latest checkpoint ' s TimeLineID " ]
2017-06-27 07:42:52 +02:00
timeline_node2 = node2 . get_control_data ( ) [ " Latest checkpoint ' s TimeLineID " ]
self . assertEqual ( timeline_node1 , timeline_node2 , " Timelines on Master and Node1 should be equal. This is unexpected " )
2017-05-22 13:17:43 +02:00
archive_command_node1 = node1 . safe_psql ( " postgres " , " show archive_command " )
2017-06-27 07:42:52 +02:00
archive_command_node2 = node2 . safe_psql ( " postgres " , " show archive_command " )
self . assertEqual ( archive_command_node1 , archive_command_node2 , " Archive command on Master and Node should be equal. This is unexpected " )
2017-05-22 13:17:43 +02:00
2017-06-27 07:42:52 +02:00
result = node2 . safe_psql ( " postgres " , " select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL " )
2017-05-22 13:17:43 +02:00
# self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip()))
2017-06-27 07:42:52 +02:00
if result == " " :
2017-05-22 13:17:43 +02:00
self . assertEqual ( 1 , 0 , ' Error is expected due to Master and Node1 having the common archive and archive_command ' )
2017-06-27 07:42:52 +02:00
# Clean after yourself
self . del_test_dir ( self . module_name , fname )
2017-05-22 13:17:43 +02:00
2017-06-27 07:42:52 +02:00
# @unittest.skip("skip")
2017-05-22 13:17:43 +02:00
def pgpro688 ( self ) :
2017-06-27 07:42:52 +02:00
""" make node with archiving, make backup, get Recovery Time, validate to Recovery Time. Waiting PGPRO-688. RESOLVED """
2017-05-22 13:17:43 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2017-06-27 07:42:52 +02:00
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( self . module_name , fname ) ,
set_replication = True ,
2017-05-22 13:17:43 +02:00
initdb_params = [ ' --data-checksums ' ] ,
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' }
)
2017-06-27 07:42:52 +02:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2017-05-22 13:17:43 +02:00
node . start ( )
2017-06-27 07:42:52 +02:00
backup_id = self . backup_node ( backup_dir , ' node ' , node )
recovery_time = self . show_pb ( backup_dir , ' node ' , backup_id ) [ ' recovery-time ' ]
2017-05-22 13:17:43 +02:00
# Uncommenting this section will make this test True Positive
#node.psql("postgres", "select pg_create_restore_point('123')")
#node.psql("postgres", "select txid_current()")
#node.psql("postgres", "select pg_switch_xlog()")
####
2017-06-27 07:42:52 +02:00
#try:
self . validate_pb ( backup_dir , ' node ' , options = [ " --time= ' {0} ' " . format ( recovery_time ) ] )
# we should die here because exception is what we expect to happen
# self.assertEqual(1, 0, "Expecting Error because it should not be possible safely validate 'Recovery Time' without wal record with timestamp.\n Output: {0} \n CMD: {1}".format(
# repr(self.output), self.cmd))
# except ProbackupException as e:
# self.assertTrue('WARNING: recovery can be done up to time {0}'.format(recovery_time) in e.message,
# '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
2017-05-22 13:17:43 +02:00
2017-06-27 07:42:52 +02:00
# Clean after yourself
self . del_test_dir ( self . module_name , fname )
2017-05-22 13:17:43 +02:00
2017-06-27 07:42:52 +02:00
# @unittest.skip("skip")
2017-05-22 13:17:43 +02:00
def pgpro702_688 ( self ) :
2017-06-27 07:42:52 +02:00
""" make node without archiving, make stream backup, get Recovery Time, validate to Recovery Time """
2017-05-22 13:17:43 +02:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2017-06-27 07:42:52 +02:00
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( self . module_name , fname ) ,
2017-05-22 13:17:43 +02:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' }
)
2017-06-27 07:42:52 +02:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
2017-05-22 13:17:43 +02:00
node . start ( )
2017-06-27 07:42:52 +02:00
backup_id = self . backup_node ( backup_dir , ' node ' , node , options = [ " --stream " ] )
recovery_time = self . show_pb ( backup_dir , ' node ' , backup_id ) [ ' recovery-time ' ]
2017-05-22 13:17:43 +02:00
self . assertIn ( six . b ( " INFO: backup validation completed successfully on " ) ,
2017-06-27 07:42:52 +02:00
self . validate_pb ( backup_dir , ' node ' , node , options = [ " --time= ' {0} ' " . format ( recovery_time ) ] ) )
2017-05-22 13:17:43 +02:00
2017-06-27 07:42:52 +02:00
# Clean after yourself
self . del_test_dir ( self . module_name , fname )
# @unittest.skip("skip")
@unittest.expectedFailure
2017-05-22 13:17:43 +02:00
def test_validate_wal_lost_segment ( self ) :
""" Loose segment located between backups. ExpectedFailure. This is BUG """
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
2017-06-27 07:42:52 +02:00
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( self . module_name , fname ) ,
set_replication = True ,
2017-05-22 13:17:43 +02:00
initdb_params = [ ' --data-checksums ' ] ,
2017-06-27 07:42:52 +02:00
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' }
2017-05-22 13:17:43 +02:00
)
2017-06-27 07:42:52 +02:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2017-05-22 13:17:43 +02:00
node . start ( )
2017-06-27 07:42:52 +02:00
self . backup_node ( backup_dir , ' node ' , node )
2017-05-22 13:17:43 +02:00
# make some wals
node . pgbench_init ( scale = 2 )
pgbench = node . pgbench (
stdout = subprocess . PIPE ,
stderr = subprocess . STDOUT ,
options = [ " -c " , " 4 " , " -T " , " 10 " ]
)
pgbench . wait ( )
pgbench . stdout . close ( )
# delete last wal segment
2017-06-27 07:42:52 +02:00
wals_dir = os . path . join ( backup_dir , " wal " , ' node ' )
2017-05-22 13:17:43 +02:00
wals = [ f for f in os . listdir ( wals_dir ) if os . path . isfile ( os . path . join ( wals_dir , f ) ) and not f . endswith ( ' .backup ' ) ]
wals = map ( int , wals )
2017-06-27 07:42:52 +02:00
os . remove ( os . path . join ( wals_dir , ' 0000000 ' + str ( max ( wals ) ) ) )
2017-05-22 13:17:43 +02:00
##### Hole Smokes, Batman! We just lost a wal segment and know nothing about it
##### We need archive-push ASAP
2017-06-27 07:42:52 +02:00
self . backup_node ( backup_dir , ' node ' , node )
self . assertFalse ( ' validation completed successfully ' in self . validate_pb ( backup_dir , ' node ' ) )
2017-05-22 13:17:43 +02:00
########
2017-06-27 07:42:52 +02:00
# Clean after yourself
self . del_test_dir ( self . module_name , fname )
@unittest.expectedFailure
# Need to force validation of ancestor-chain
def test_incremental_backup_corrupt_full_1 ( self ) :
""" page-level backup with corrupted full backup """
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( self . module_name , fname ) ,
initdb_params = [ ' --data-checksums ' ] ,
pg_options = { ' wal_level ' : ' replica ' , ' ptrack_enable ' : ' on ' }
)
backup_dir = os . path . join ( self . tmp_path , self . module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . start ( )
backup_id = self . backup_node ( backup_dir , ' node ' , node )
file = os . path . join ( backup_dir , " backups " , " node " , backup_id . decode ( " utf-8 " ) , " database " , " postgresql.conf " )
os . remove ( file )
try :
self . backup_node ( backup_dir , ' node ' , node , backup_type = " page " )
# we should die here because exception is what we expect to happen
self . assertEqual ( 1 , 0 , " Expecting Error because page backup should not be possible without valid full backup. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertEqual ( e . message ,
' ERROR: Valid backup on current timeline is not found. Create new FULL backup before an incremental one. \n ' ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
sleep ( 1 )
self . assertEqual ( 1 , 0 , " Expecting Error because page backup should not be possible without valid full backup. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertEqual ( e . message ,
' ERROR: Valid backup on current timeline is not found. Create new FULL backup before an incremental one. \n ' ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
self . assertEqual ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' Status ' ] , " ERROR " )
# Clean after yourself
self . del_test_dir ( self . module_name , fname )