2016-12-12 19:21:15 +03:00
import unittest
2017-06-20 13:57:23 +03:00
import os
2022-06-03 13:45:50 +05:00
from time import sleep , time
2022-09-01 14:38:17 +03:00
from . helpers . ptrack_helpers import base36enc , ProbackupTest , ProbackupException
2019-06-27 19:43:03 +03:00
import shutil
2019-09-17 17:35:27 +03:00
from distutils . dir_util import copy_tree
2021-03-29 13:06:09 +03:00
from testgres import ProcessType , QueryException
2021-02-11 09:51:38 +03:00
import subprocess
2016-12-12 19:21:15 +03:00
2017-07-12 17:28:28 +03:00
class BackupTest ( ProbackupTest , unittest . TestCase ) :
2017-05-03 14:14:48 +03:00
2022-11-16 13:43:20 +03:00
def test_full_backup ( self ) :
2022-11-11 22:34:46 +03:00
"""
Just test full backup with at least two segments
"""
node = self . make_simple_node (
2022-11-15 18:15:11 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2022-11-11 22:34:46 +03:00
initdb_params = [ ' --data-checksums ' ] ,
# we need to write a lot. Lets speedup a bit.
pg_options = { " fsync " : " off " , " synchronous_commit " : " off " } )
2022-11-15 18:15:11 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2022-11-11 22:34:46 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
# Fill with data
# Have to use scale=100 to create second segment.
node . pgbench_init ( scale = 100 , no_vacuum = True )
# FULL
backup_id = self . backup_node ( backup_dir , ' node ' , node )
out = self . validate_pb ( backup_dir , ' node ' , backup_id )
self . assertIn (
" INFO: Backup {0} is valid " . format ( backup_id ) ,
out )
2022-11-16 13:43:20 +03:00
def test_full_backup_stream ( self ) :
"""
Just test full backup with at least two segments in stream mode
"""
node = self . make_simple_node (
2022-11-16 16:12:10 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2022-11-16 13:43:20 +03:00
initdb_params = [ ' --data-checksums ' ] ,
# we need to write a lot. Lets speedup a bit.
pg_options = { " fsync " : " off " , " synchronous_commit " : " off " } )
2022-11-16 16:12:10 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2022-11-16 13:43:20 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
# Fill with data
# Have to use scale=100 to create second segment.
node . pgbench_init ( scale = 100 , no_vacuum = True )
# FULL
backup_id = self . backup_node ( backup_dir , ' node ' , node ,
options = [ " --stream " ] )
out = self . validate_pb ( backup_dir , ' node ' , backup_id )
self . assertIn (
" INFO: Backup {0} is valid " . format ( backup_id ) ,
out )
2017-05-22 14:17:43 +03:00
# @unittest.skip("skip")
# @unittest.expectedFailure
2017-06-07 17:52:07 +03:00
# PGPRO-707
2017-05-03 14:14:48 +03:00
def test_backup_modes_archive ( self ) :
""" standart backup modes with ARCHIVE WAL method """
2018-01-17 21:15:49 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-10-16 23:08:04 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-03-01 19:19:56 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2017-06-20 13:57:23 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 17:48:49 +03:00
node . slow_start ( )
2017-05-03 14:14:48 +03:00
2021-06-18 12:12:37 +03:00
full_backup_id = self . backup_node ( backup_dir , ' node ' , node )
2017-06-20 13:57:23 +03:00
show_backup = self . show_pb ( backup_dir , ' node ' ) [ 0 ]
2017-05-03 14:14:48 +03:00
2018-06-02 20:35:37 +03:00
self . assertEqual ( show_backup [ ' status ' ] , " OK " )
self . assertEqual ( show_backup [ ' backup-mode ' ] , " FULL " )
2017-05-03 14:14:48 +03:00
# postmaster.pid and postmaster.opts shouldn't be copied
excluded = True
2018-01-17 21:15:49 +03:00
db_dir = os . path . join (
2021-06-18 12:12:37 +03:00
backup_dir , " backups " , ' node ' , full_backup_id , " database " )
2018-01-17 21:15:49 +03:00
2017-06-20 13:57:23 +03:00
for f in os . listdir ( db_dir ) :
2018-01-17 21:15:49 +03:00
if (
os . path . isfile ( os . path . join ( db_dir , f ) ) and
(
f == " postmaster.pid " or
f == " postmaster.opts "
)
) :
excluded = False
self . assertEqual ( excluded , True )
2017-05-03 14:14:48 +03:00
# page backup mode
2018-01-17 21:15:49 +03:00
page_backup_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = " page " )
2017-05-03 14:14:48 +03:00
2021-06-18 12:12:37 +03:00
show_backup_1 = self . show_pb ( backup_dir , ' node ' ) [ 1 ]
2021-10-14 16:06:10 +03:00
self . assertEqual ( show_backup_1 [ ' status ' ] , " OK " )
self . assertEqual ( show_backup_1 [ ' backup-mode ' ] , " PAGE " )
2017-05-03 14:14:48 +03:00
2021-06-18 12:12:37 +03:00
# delta backup mode
delta_backup_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = " delta " )
show_backup_2 = self . show_pb ( backup_dir , ' node ' ) [ 2 ]
2021-10-14 16:06:10 +03:00
self . assertEqual ( show_backup_2 [ ' status ' ] , " OK " )
self . assertEqual ( show_backup_2 [ ' backup-mode ' ] , " DELTA " )
2021-06-18 12:12:37 +03:00
2017-05-03 14:14:48 +03:00
# Check parent backup
self . assertEqual (
2021-06-18 12:12:37 +03:00
full_backup_id ,
2018-01-17 21:15:49 +03:00
self . show_pb (
backup_dir , ' node ' ,
2021-06-18 12:12:37 +03:00
backup_id = show_backup_1 [ ' id ' ] ) [ " parent-backup-id " ] )
2017-05-03 14:14:48 +03:00
2017-06-20 13:57:23 +03:00
self . assertEqual (
page_backup_id ,
2018-01-17 21:15:49 +03:00
self . show_pb (
backup_dir , ' node ' ,
2021-06-18 12:12:37 +03:00
backup_id = show_backup_2 [ ' id ' ] ) [ " parent-backup-id " ] )
2017-06-20 13:57:23 +03:00
# @unittest.skip("skip")
2017-05-03 14:14:48 +03:00
def test_smooth_checkpoint ( self ) :
""" full backup with smooth checkpoint """
2018-01-17 21:15:49 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-03-01 19:19:56 +03:00
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2017-06-20 13:57:23 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 17:48:49 +03:00
node . slow_start ( )
2017-06-20 13:57:23 +03:00
2018-01-17 21:15:49 +03:00
self . backup_node (
backup_dir , ' node ' , node ,
options = [ " -C " ] )
2018-06-02 20:35:37 +03:00
self . assertEqual ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] , " OK " )
2017-06-20 13:57:23 +03:00
node . stop ( )
2017-11-30 09:14:46 +03:00
# @unittest.skip("skip")
2017-06-20 13:57:23 +03:00
def test_incremental_backup_without_full ( self ) :
2021-06-18 12:12:37 +03:00
""" page backup without validated full backup """
2018-01-17 21:15:49 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-10-16 23:11:49 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-03-01 19:19:56 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2017-06-20 13:57:23 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 17:48:49 +03:00
node . slow_start ( )
2017-05-03 14:14:48 +03:00
2017-06-20 13:57:23 +03:00
try :
self . backup_node ( backup_dir , ' node ' , node , backup_type = " page " )
# we should die here because exception is what we expect to happen
2018-01-17 21:15:49 +03:00
self . assertEqual (
1 , 0 ,
" Expecting Error because page backup should not be possible "
" without valid full backup. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-06-27 08:42:52 +03:00
except ProbackupException as e :
2020-05-21 19:05:20 +03:00
self . assertTrue (
2021-04-22 17:36:04 +03:00
" WARNING: Valid full backup on current timeline 1 is not found " in e . message and
2020-05-21 19:05:20 +03:00
" ERROR: Create new full backup before an incremental one " in e . message ,
2018-01-17 21:15:49 +03:00
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
2017-05-03 14:14:48 +03:00
2018-01-17 21:15:49 +03:00
self . assertEqual (
2018-06-02 20:35:37 +03:00
self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] ,
2018-01-17 21:15:49 +03:00
" ERROR " )
2017-06-27 08:42:52 +03:00
2017-11-30 09:14:46 +03:00
# @unittest.skip("skip")
2017-06-20 13:57:23 +03:00
def test_incremental_backup_corrupt_full ( self ) :
""" page-level backup with corrupted full backup """
2018-01-17 21:15:49 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-07-12 18:01:28 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-03-01 19:19:56 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2017-06-20 13:57:23 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
2018-12-25 17:48:49 +03:00
node . slow_start ( )
2017-06-20 13:57:23 +03:00
backup_id = self . backup_node ( backup_dir , ' node ' , node )
2018-01-17 21:15:49 +03:00
file = os . path . join (
backup_dir , " backups " , " node " , backup_id ,
" database " , " postgresql.conf " )
2017-06-20 13:57:23 +03:00
os . remove ( file )
2017-05-03 14:14:48 +03:00
try :
2017-06-27 08:42:52 +03:00
self . validate_pb ( backup_dir , ' node ' )
2017-06-20 13:57:23 +03:00
# we should die here because exception is what we expect to happen
2018-01-17 21:15:49 +03:00
self . assertEqual (
1 , 0 ,
" Expecting Error because of validation of corrupted backup. \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-06-27 08:42:52 +03:00
except ProbackupException as e :
2018-01-17 21:15:49 +03:00
self . assertTrue (
2018-12-25 22:04:56 +03:00
" INFO: Validate backups of the instance ' node ' " in e . message and
2020-12-01 12:36:36 +03:00
" WARNING: Backup file " in e . message and " is not found " in e . message and
2018-12-25 22:04:56 +03:00
" WARNING: Backup {0} data files are corrupted " . format (
2018-01-17 21:15:49 +03:00
backup_id ) in e . message and
2018-12-25 22:04:56 +03:00
" WARNING: Some backups are not valid " in e . message ,
2018-01-17 21:15:49 +03:00
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
2017-06-20 13:57:23 +03:00
2017-06-27 08:42:52 +03:00
try :
self . backup_node ( backup_dir , ' node ' , node , backup_type = " page " )
# we should die here because exception is what we expect to happen
2018-01-17 21:15:49 +03:00
self . assertEqual (
1 , 0 ,
" Expecting Error because page backup should not be possible "
" without valid full backup. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2017-06-27 08:42:52 +03:00
except ProbackupException as e :
2020-04-03 18:08:53 +03:00
self . assertTrue (
2021-04-22 17:36:04 +03:00
" WARNING: Valid full backup on current timeline 1 is not found " in e . message and
2020-04-03 18:08:53 +03:00
" ERROR: Create new full backup before an incremental one " in e . message ,
2018-01-17 21:15:49 +03:00
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
2017-05-03 14:14:48 +03:00
2018-01-17 21:15:49 +03:00
self . assertEqual (
self . show_pb ( backup_dir , ' node ' , backup_id ) [ ' status ' ] , " CORRUPT " )
self . assertEqual (
2018-06-02 20:35:37 +03:00
self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] , " ERROR " )
2017-06-27 08:42:52 +03:00
2017-06-20 13:57:23 +03:00
# @unittest.skip("skip")
2021-06-18 12:12:37 +03:00
def test_delta_threads_stream ( self ) :
""" delta multi thread backup mode and stream """
2018-01-17 21:15:49 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2017-05-03 14:14:48 +03:00
set_replication = True ,
2021-06-18 12:12:37 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-03-01 19:19:56 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2017-06-20 13:57:23 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
2018-12-25 17:48:49 +03:00
node . slow_start ( )
2017-05-03 14:14:48 +03:00
2018-01-17 21:15:49 +03:00
self . backup_node (
backup_dir , ' node ' , node , backup_type = " full " ,
options = [ " -j " , " 4 " , " --stream " ] )
2017-05-03 14:14:48 +03:00
2018-06-02 20:35:37 +03:00
self . assertEqual ( self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] , " OK " )
2018-01-17 21:15:49 +03:00
self . backup_node (
backup_dir , ' node ' , node ,
2021-06-18 12:12:37 +03:00
backup_type = " delta " , options = [ " -j " , " 4 " , " --stream " ] )
2018-06-02 20:35:37 +03:00
self . assertEqual ( self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] , " OK " )
2017-06-27 08:42:52 +03:00
2017-12-08 16:11:21 +03:00
# @unittest.skip("skip")
2020-05-21 19:05:20 +03:00
def test_page_detect_corruption ( self ) :
2017-12-08 16:11:21 +03:00
""" make node, corrupt some page, check that backup failed """
2019-07-12 18:01:28 +03:00
2018-01-17 20:51:43 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2017-12-08 16:11:21 +03:00
set_replication = True ,
2020-06-23 12:16:21 +03:00
ptrack_enable = self . ptrack ,
2019-05-28 12:41:03 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-03-01 19:19:56 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2018-01-17 20:51:43 +03:00
2017-12-08 16:11:21 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
2018-12-25 17:48:49 +03:00
node . slow_start ( )
2017-12-08 16:11:21 +03:00
2018-01-17 20:51:43 +03:00
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " full " , options = [ " -j " , " 4 " , " --stream " ] )
2017-12-08 16:11:21 +03:00
node . safe_psql (
" postgres " ,
2018-01-17 20:51:43 +03:00
" create table t_heap as select 1 as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,1000) i " )
2019-12-23 18:23:29 +03:00
2017-12-08 16:11:21 +03:00
node . safe_psql (
" postgres " ,
2019-12-23 18:23:29 +03:00
" CHECKPOINT " )
2017-12-08 16:11:21 +03:00
2018-01-17 20:51:43 +03:00
heap_path = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' t_heap ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2018-01-17 20:51:43 +03:00
2020-05-22 23:22:54 +03:00
path = os . path . join ( node . data_dir , heap_path )
with open ( path , " rb+ " , 0 ) as f :
2018-01-17 20:51:43 +03:00
f . seek ( 9000 )
f . write ( b " bla " )
f . flush ( )
f . close
2020-05-22 23:22:54 +03:00
try :
self . backup_node (
backup_dir , ' node ' , node , backup_type = " full " ,
options = [ " -j " , " 4 " , " --stream " , " --log-level-file=VERBOSE " ] )
self . assertEqual (
1 , 0 ,
" Expecting Error because data file is corrupted "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertTrue (
' ERROR: Corruption detected in file " {0} " , '
' block 1: page verification failed, calculated checksum ' . format ( path ) ,
e . message )
2018-01-17 20:51:43 +03:00
2020-05-22 23:22:54 +03:00
self . assertEqual (
self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] ,
' ERROR ' ,
" Backup Status should be ERROR " )
2018-01-17 20:51:43 +03:00
2019-06-28 00:41:14 +03:00
# @unittest.skip("skip")
def test_backup_detect_corruption ( self ) :
""" make node, corrupt some page, check that backup failed """
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-06-28 00:41:14 +03:00
set_replication = True ,
2019-12-23 18:23:29 +03:00
ptrack_enable = self . ptrack ,
2019-06-28 00:41:14 +03:00
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-06-28 00:41:14 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
2020-03-14 14:35:57 +03:00
self . set_archiving ( backup_dir , ' node ' , node )
2019-06-28 00:41:14 +03:00
node . slow_start ( )
2022-09-01 14:38:17 +03:00
if self . ptrack :
2019-12-23 18:23:29 +03:00
node . safe_psql (
" postgres " ,
" create extension ptrack " )
2020-05-21 19:05:20 +03:00
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " full " , options = [ " -j " , " 4 " , " --stream " ] )
2019-06-28 00:41:14 +03:00
node . safe_psql (
" postgres " ,
" create table t_heap as select 1 as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
2020-03-14 14:35:57 +03:00
" from generate_series(0,10000) i " )
2019-06-28 00:41:14 +03:00
heap_path = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' t_heap ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2019-06-28 00:41:14 +03:00
2020-03-14 14:35:57 +03:00
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " full " , options = [ " -j " , " 4 " , " --stream " ] )
node . safe_psql (
" postgres " ,
" select count(*) from t_heap " )
node . safe_psql (
" postgres " ,
" update t_heap set id = id + 10000 " )
2019-07-16 01:18:00 +03:00
node . stop ( )
2020-03-14 14:35:57 +03:00
heap_fullpath = os . path . join ( node . data_dir , heap_path )
with open ( heap_fullpath , " rb+ " , 0 ) as f :
2019-06-28 00:41:14 +03:00
f . seek ( 9000 )
f . write ( b " bla " )
f . flush ( )
f . close
2019-07-16 01:18:00 +03:00
node . slow_start ( )
2019-06-28 00:41:14 +03:00
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " full " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
2020-03-14 14:35:57 +03:00
" Expecting Error because of block corruption "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
2020-05-22 23:22:54 +03:00
' ERROR: Corruption detected in file " {0} " , block 1: '
' page verification failed, calculated checksum ' . format (
2020-03-14 14:35:57 +03:00
heap_fullpath ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
sleep ( 1 )
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " delta " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
2019-06-28 00:41:14 +03:00
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2020-03-14 14:35:57 +03:00
self . assertIn (
2020-05-22 23:22:54 +03:00
' ERROR: Corruption detected in file " {0} " , block 1: '
' page verification failed, calculated checksum ' . format (
2020-03-14 14:35:57 +03:00
heap_fullpath ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
sleep ( 1 )
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " page " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
2019-06-28 00:41:14 +03:00
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2020-03-14 14:35:57 +03:00
self . assertIn (
2020-05-22 23:22:54 +03:00
' ERROR: Corruption detected in file " {0} " , block 1: '
' page verification failed, calculated checksum ' . format (
2020-03-14 14:35:57 +03:00
heap_fullpath ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
sleep ( 1 )
if self . ptrack :
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " ptrack " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2020-05-21 19:05:20 +03:00
self . assertIn (
' ERROR: Corruption detected in file " {0} " , block 1: '
' page verification failed, calculated checksum ' . format (
heap_fullpath ) ,
e . message ,
2020-03-14 14:35:57 +03:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
# @unittest.skip("skip")
def test_backup_detect_invalid_block_header ( self ) :
""" make node, corrupt some page, check that backup failed """
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2020-03-14 14:35:57 +03:00
set_replication = True ,
ptrack_enable = self . ptrack ,
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2020-03-14 14:35:57 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
2022-09-01 14:38:17 +03:00
if self . ptrack :
2020-03-14 14:35:57 +03:00
node . safe_psql (
" postgres " ,
" create extension ptrack " )
node . safe_psql (
" postgres " ,
" create table t_heap as select 1 as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
heap_path = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' t_heap ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2020-03-14 14:35:57 +03:00
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " full " , options = [ " -j " , " 4 " , " --stream " ] )
node . safe_psql (
" postgres " ,
" select count(*) from t_heap " )
node . safe_psql (
" postgres " ,
" update t_heap set id = id + 10000 " )
node . stop ( )
heap_fullpath = os . path . join ( node . data_dir , heap_path )
with open ( heap_fullpath , " rb+ " , 0 ) as f :
f . seek ( 8193 )
f . write ( b " blahblahblahblah " )
f . flush ( )
f . close
node . slow_start ( )
# self.backup_node(
# backup_dir, 'node', node,
# backup_type="full", options=["-j", "4", "--stream"])
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " full " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
2020-05-22 23:22:54 +03:00
' ERROR: Corruption detected in file " {0} " , block 1: '
' page header invalid, pd_lower ' . format ( heap_fullpath ) ,
2020-03-14 14:35:57 +03:00
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
sleep ( 1 )
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " delta " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
2020-05-22 23:22:54 +03:00
' ERROR: Corruption detected in file " {0} " , block 1: '
' page header invalid, pd_lower ' . format ( heap_fullpath ) ,
2020-03-14 14:35:57 +03:00
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
sleep ( 1 )
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " page " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
2020-05-22 23:22:54 +03:00
' ERROR: Corruption detected in file " {0} " , block 1: '
' page header invalid, pd_lower ' . format ( heap_fullpath ) ,
2020-03-14 14:35:57 +03:00
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
sleep ( 1 )
if self . ptrack :
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " ptrack " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2020-06-12 02:07:29 +03:00
self . assertIn (
' ERROR: Corruption detected in file " {0} " , block 1: '
' page header invalid, pd_lower ' . format ( heap_fullpath ) ,
e . message ,
2020-03-14 14:35:57 +03:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
# @unittest.skip("skip")
def test_backup_detect_missing_permissions ( self ) :
""" make node, corrupt some page, check that backup failed """
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2020-03-14 14:35:57 +03:00
set_replication = True ,
ptrack_enable = self . ptrack ,
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2020-03-14 14:35:57 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
2022-09-01 14:38:17 +03:00
if self . ptrack :
2020-03-14 14:35:57 +03:00
node . safe_psql (
" postgres " ,
" create extension ptrack " )
node . safe_psql (
" postgres " ,
" create table t_heap as select 1 as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,10000) i " )
heap_path = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' t_heap ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2020-03-14 14:35:57 +03:00
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " full " , options = [ " -j " , " 4 " , " --stream " ] )
node . safe_psql (
" postgres " ,
" select count(*) from t_heap " )
node . safe_psql (
" postgres " ,
" update t_heap set id = id + 10000 " )
node . stop ( )
heap_fullpath = os . path . join ( node . data_dir , heap_path )
with open ( heap_fullpath , " rb+ " , 0 ) as f :
f . seek ( 8193 )
f . write ( b " blahblahblahblah " )
f . flush ( )
f . close
node . slow_start ( )
# self.backup_node(
# backup_dir, 'node', node,
# backup_type="full", options=["-j", "4", "--stream"])
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " full " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
2020-05-22 23:22:54 +03:00
' ERROR: Corruption detected in file " {0} " , block 1: '
' page header invalid, pd_lower ' . format ( heap_fullpath ) ,
2020-03-14 14:35:57 +03:00
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
sleep ( 1 )
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " delta " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
2020-05-22 23:22:54 +03:00
' ERROR: Corruption detected in file " {0} " , block 1: '
' page header invalid, pd_lower ' . format ( heap_fullpath ) ,
2020-03-14 14:35:57 +03:00
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
sleep ( 1 )
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " page " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
2020-05-22 23:22:54 +03:00
' ERROR: Corruption detected in file " {0} " , block 1: '
' page header invalid, pd_lower ' . format ( heap_fullpath ) ,
2020-03-14 14:35:57 +03:00
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
sleep ( 1 )
if self . ptrack :
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = " ptrack " , options = [ " -j " , " 4 " , " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of block corruption "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2020-06-12 02:07:29 +03:00
self . assertIn (
' ERROR: Corruption detected in file " {0} " , block 1: '
' page header invalid, pd_lower ' . format ( heap_fullpath ) ,
e . message ,
2019-07-03 18:06:47 +03:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
# @unittest.skip("skip")
def test_backup_truncate_misaligned ( self ) :
"""
make node , truncate file to size not even to BLCKSIZE ,
take backup
"""
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-07-03 18:06:47 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-07-03 18:06:47 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
node . safe_psql (
" postgres " ,
" create table t_heap as select 1 as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,100000) i " )
node . safe_psql (
" postgres " ,
" CHECKPOINT; " )
heap_path = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' t_heap ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2019-07-03 18:06:47 +03:00
heap_size = node . safe_psql (
" postgres " ,
" select pg_relation_size( ' t_heap ' ) " )
with open ( os . path . join ( node . data_dir , heap_path ) , " rb+ " , 0 ) as f :
f . truncate ( int ( heap_size ) - 4096 )
f . flush ( )
f . close
output = self . backup_node (
backup_dir , ' node ' , node , backup_type = " full " ,
options = [ " -j " , " 4 " , " --stream " ] , return_id = False )
self . assertIn ( " WARNING: File " , output )
self . assertIn ( " invalid file size " , output )
2019-06-28 00:41:14 +03:00
2018-01-25 19:49:25 +03:00
# @unittest.skip("skip")
2018-01-28 04:36:27 +03:00
def test_tablespace_in_pgdata_pgpro_1376 ( self ) :
""" PGPRO-1376 """
2018-01-25 19:49:25 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2018-01-25 19:49:25 +03:00
set_replication = True ,
2019-04-23 17:55:04 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-03-01 19:19:56 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2018-01-25 19:49:25 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
2018-12-25 17:48:49 +03:00
node . slow_start ( )
2018-01-25 19:49:25 +03:00
self . create_tblspace_in_node (
node , ' tblspace1 ' ,
2018-04-28 18:49:34 +03:00
tblspc_path = (
os . path . join (
node . data_dir , ' somedirectory ' , ' 100500 ' ) )
2018-01-25 19:49:25 +03:00
)
2018-04-25 22:15:05 +03:00
self . create_tblspace_in_node (
node , ' tblspace2 ' ,
tblspc_path = ( os . path . join ( node . data_dir ) )
)
node . safe_psql (
" postgres " ,
" create table t_heap1 tablespace tblspace1 as select 1 as id, "
" md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,1000) i " )
2018-01-25 19:49:25 +03:00
node . safe_psql (
" postgres " ,
2018-04-25 22:15:05 +03:00
" create table t_heap2 tablespace tblspace2 as select 1 as id, "
2018-01-25 19:49:25 +03:00
" md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,1000) i " )
2018-12-25 17:12:48 +03:00
backup_id_1 = self . backup_node (
backup_dir , ' node ' , node , backup_type = " full " ,
options = [ " -j " , " 4 " , " --stream " ] )
2018-04-25 22:15:05 +03:00
node . safe_psql (
" postgres " ,
" drop table t_heap2 " )
node . safe_psql (
" postgres " ,
" drop tablespace tblspace2 " )
2018-01-25 19:49:25 +03:00
self . backup_node (
2018-04-25 22:15:05 +03:00
backup_dir , ' node ' , node , backup_type = " full " ,
options = [ " -j " , " 4 " , " --stream " ] )
pgdata = self . pgdata_content ( node . data_dir )
2018-01-25 19:49:25 +03:00
relfilenode = node . safe_psql (
" postgres " ,
2018-04-25 22:15:05 +03:00
" select ' t_heap1 ' ::regclass::oid "
2020-10-30 02:47:06 +03:00
) . decode ( ' utf-8 ' ) . rstrip ( )
2018-01-25 19:49:25 +03:00
list = [ ]
2018-12-25 17:12:48 +03:00
for root , dirs , files in os . walk ( os . path . join (
2018-12-27 22:40:23 +03:00
backup_dir , ' backups ' , ' node ' , backup_id_1 ) ) :
2018-01-25 19:49:25 +03:00
for file in files :
if file == relfilenode :
path = os . path . join ( root , file )
list = list + [ path ]
2019-07-23 20:10:58 +03:00
# We expect that relfilenode can be encountered only once
2018-04-24 17:45:30 +03:00
if len ( list ) > 1 :
2018-01-25 19:49:25 +03:00
message = " "
for string in list :
message = message + string + " \n "
self . assertEqual (
2018-01-26 18:29:31 +03:00
1 , 0 ,
" Following file copied twice by backup: \n {0} " . format (
2018-01-25 19:49:25 +03:00
message )
)
2018-04-25 22:15:05 +03:00
node . cleanup ( )
self . restore_node (
backup_dir , ' node ' , node , options = [ " -j " , " 4 " ] )
if self . paranoia :
pgdata_restored = self . pgdata_content ( node . data_dir )
self . compare_pgdata ( pgdata , pgdata_restored )
2018-10-30 15:44:54 +03:00
2019-01-16 17:47:08 +03:00
# @unittest.skip("skip")
2019-05-28 12:41:03 +03:00
def test_basic_tablespace_handling ( self ) :
2019-01-16 17:47:08 +03:00
"""
make node , take full backup , check that restore with
tablespace mapping will end with error , take page backup ,
check that restore with tablespace mapping will end with
success
"""
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-01-16 17:47:08 +03:00
set_replication = True ,
2019-04-23 17:55:04 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-01-16 17:47:08 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-01-16 17:47:08 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
2021-01-26 02:43:31 +03:00
backup_id = self . backup_node (
2019-01-16 17:47:08 +03:00
backup_dir , ' node ' , node , backup_type = " full " ,
options = [ " -j " , " 4 " , " --stream " ] )
tblspace1_old_path = self . get_tblspace_path ( node , ' tblspace1_old ' )
tblspace2_old_path = self . get_tblspace_path ( node , ' tblspace2_old ' )
self . create_tblspace_in_node (
node , ' some_lame_tablespace ' )
self . create_tblspace_in_node (
node , ' tblspace1 ' ,
tblspc_path = tblspace1_old_path )
self . create_tblspace_in_node (
node , ' tblspace2 ' ,
tblspc_path = tblspace2_old_path )
node . safe_psql (
" postgres " ,
" create table t_heap_lame tablespace some_lame_tablespace "
" as select 1 as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,1000) i " )
node . safe_psql (
" postgres " ,
" create table t_heap2 tablespace tblspace2 as select 1 as id, "
" md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,1000) i " )
tblspace1_new_path = self . get_tblspace_path ( node , ' tblspace1_new ' )
tblspace2_new_path = self . get_tblspace_path ( node , ' tblspace2_new ' )
node_restored = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node_restored ' ) )
2019-01-16 17:47:08 +03:00
node_restored . cleanup ( )
try :
self . restore_node (
backup_dir , ' node ' , node_restored ,
options = [
" -j " , " 4 " ,
" -T " , " {0} = {1} " . format (
tblspace1_old_path , tblspace1_new_path ) ,
" -T " , " {0} = {1} " . format (
tblspace2_old_path , tblspace2_new_path ) ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because tablespace mapping is incorrect "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2021-01-26 02:43:31 +03:00
self . assertIn (
' ERROR: Backup {0} has no tablespaceses, '
' nothing to remap ' . format ( backup_id ) ,
e . message ,
2019-01-16 17:47:08 +03:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
node . safe_psql (
" postgres " ,
" drop table t_heap_lame " )
node . safe_psql (
" postgres " ,
" drop tablespace some_lame_tablespace " )
self . backup_node (
2019-04-30 00:30:25 +03:00
backup_dir , ' node ' , node , backup_type = " delta " ,
options = [ " -j " , " 4 " , " --stream " ] )
2019-01-16 17:47:08 +03:00
self . restore_node (
2019-04-30 00:30:25 +03:00
backup_dir , ' node ' , node_restored ,
options = [
" -j " , " 4 " ,
" -T " , " {0} = {1} " . format (
tblspace1_old_path , tblspace1_new_path ) ,
" -T " , " {0} = {1} " . format (
tblspace2_old_path , tblspace2_new_path ) ] )
2019-01-16 17:47:08 +03:00
if self . paranoia :
pgdata = self . pgdata_content ( node . data_dir )
if self . paranoia :
pgdata_restored = self . pgdata_content ( node_restored . data_dir )
self . compare_pgdata ( pgdata , pgdata_restored )
2019-02-17 15:34:59 +03:00
# @unittest.skip("skip")
def test_tablespace_handling_1 ( self ) :
"""
make node with tablespace A , take full backup , check that restore with
tablespace mapping of tablespace B will end with error
"""
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-02-17 15:34:59 +03:00
set_replication = True ,
2019-04-23 17:55:04 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-02-17 15:34:59 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-02-17 15:34:59 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
tblspace1_old_path = self . get_tblspace_path ( node , ' tblspace1_old ' )
tblspace2_old_path = self . get_tblspace_path ( node , ' tblspace2_old ' )
tblspace_new_path = self . get_tblspace_path ( node , ' tblspace_new ' )
self . create_tblspace_in_node (
node , ' tblspace1 ' ,
tblspc_path = tblspace1_old_path )
self . backup_node (
backup_dir , ' node ' , node , backup_type = " full " ,
options = [ " -j " , " 4 " , " --stream " ] )
node_restored = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node_restored ' ) )
2019-02-17 15:34:59 +03:00
node_restored . cleanup ( )
try :
self . restore_node (
backup_dir , ' node ' , node_restored ,
options = [
" -j " , " 4 " ,
" -T " , " {0} = {1} " . format (
tblspace2_old_path , tblspace_new_path ) ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because tablespace mapping is incorrect "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertTrue (
' ERROR: --tablespace-mapping option ' in e . message and
' have an entry in tablespace_map file ' in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
# @unittest.skip("skip")
def test_tablespace_handling_2 ( self ) :
"""
make node without tablespaces , take full backup , check that restore with
tablespace mapping will end with error
"""
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-02-17 15:34:59 +03:00
set_replication = True ,
2019-04-23 17:55:04 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-02-17 15:34:59 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-02-17 15:34:59 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
tblspace1_old_path = self . get_tblspace_path ( node , ' tblspace1_old ' )
tblspace_new_path = self . get_tblspace_path ( node , ' tblspace_new ' )
2021-01-26 02:43:31 +03:00
backup_id = self . backup_node (
2019-02-17 15:34:59 +03:00
backup_dir , ' node ' , node , backup_type = " full " ,
options = [ " -j " , " 4 " , " --stream " ] )
node_restored = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node_restored ' ) )
2019-02-17 15:34:59 +03:00
node_restored . cleanup ( )
try :
self . restore_node (
backup_dir , ' node ' , node_restored ,
options = [
" -j " , " 4 " ,
" -T " , " {0} = {1} " . format (
tblspace1_old_path , tblspace_new_path ) ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because tablespace mapping is incorrect "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2021-01-26 02:43:31 +03:00
self . assertIn (
' ERROR: Backup {0} has no tablespaceses, '
' nothing to remap ' . format ( backup_id ) , e . message ,
2019-02-17 15:34:59 +03:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2020-05-13 23:51:01 +03:00
# @unittest.skip("skip")
def test_drop_rel_during_full_backup ( self ) :
""" """
2022-07-01 13:52:20 +03:00
self . _check_gdb_flag_or_skip_test ( )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2020-05-13 23:51:01 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2020-05-13 23:51:01 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
for i in range ( 1 , 512 ) :
node . safe_psql (
" postgres " ,
" create table t_heap_ {0} as select i "
" as id from generate_series(0,100) i " . format ( i ) )
node . safe_psql (
" postgres " ,
" VACUUM " )
node . pgbench_init ( scale = 10 )
relative_path_1 = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' t_heap_1 ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2020-05-13 23:51:01 +03:00
relative_path_2 = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' t_heap_1 ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2020-05-13 23:51:01 +03:00
absolute_path_1 = os . path . join ( node . data_dir , relative_path_1 )
absolute_path_2 = os . path . join ( node . data_dir , relative_path_2 )
# FULL backup
gdb = self . backup_node (
backup_dir , ' node ' , node ,
options = [ ' --stream ' , ' --log-level-file=LOG ' , ' --log-level-console=LOG ' , ' --progress ' ] ,
gdb = True )
gdb . set_breakpoint ( ' backup_files ' )
gdb . run_until_break ( )
# REMOVE file
for i in range ( 1 , 512 ) :
node . safe_psql (
" postgres " ,
" drop table t_heap_ {0} " . format ( i ) )
node . safe_psql (
" postgres " ,
" CHECKPOINT " )
node . safe_psql (
" postgres " ,
" CHECKPOINT " )
# File removed, we can proceed with backup
gdb . continue_execution_until_exit ( )
pgdata = self . pgdata_content ( node . data_dir )
#with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
# log_content = f.read()
2020-05-14 12:54:17 +03:00
# self.assertTrue(
# 'LOG: File "{0}" is not found'.format(absolute_path) in log_content,
# 'File "{0}" should be deleted but it`s not'.format(absolute_path))
node . cleanup ( )
self . restore_node ( backup_dir , ' node ' , node )
# Physical comparison
pgdata_restored = self . pgdata_content ( node . data_dir )
self . compare_pgdata ( pgdata , pgdata_restored )
2020-05-14 13:27:17 +03:00
@unittest.skip ( " skip " )
2020-05-14 12:54:17 +03:00
def test_drop_db_during_full_backup ( self ) :
""" """
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2020-05-14 12:54:17 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2020-05-14 12:54:17 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
2020-05-14 13:27:17 +03:00
for i in range ( 1 , 2 ) :
2020-05-14 12:54:17 +03:00
node . safe_psql (
" postgres " ,
" create database t_heap_ {0} " . format ( i ) )
node . safe_psql (
" postgres " ,
" VACUUM " )
# FULL backup
gdb = self . backup_node (
backup_dir , ' node ' , node , gdb = True ,
options = [
' --stream ' , ' --log-level-file=LOG ' ,
' --log-level-console=LOG ' , ' --progress ' ] )
gdb . set_breakpoint ( ' backup_files ' )
gdb . run_until_break ( )
# REMOVE file
2020-05-14 13:27:17 +03:00
for i in range ( 1 , 2 ) :
2020-05-14 12:54:17 +03:00
node . safe_psql (
" postgres " ,
" drop database t_heap_ {0} " . format ( i ) )
node . safe_psql (
" postgres " ,
" CHECKPOINT " )
node . safe_psql (
" postgres " ,
" CHECKPOINT " )
# File removed, we can proceed with backup
gdb . continue_execution_until_exit ( )
pgdata = self . pgdata_content ( node . data_dir )
#with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
# log_content = f.read()
2020-05-13 23:51:01 +03:00
# self.assertTrue(
# 'LOG: File "{0}" is not found'.format(absolute_path) in log_content,
# 'File "{0}" should be deleted but it`s not'.format(absolute_path))
node . cleanup ( )
self . restore_node ( backup_dir , ' node ' , node )
# Physical comparison
pgdata_restored = self . pgdata_content ( node . data_dir )
self . compare_pgdata ( pgdata , pgdata_restored )
2019-02-07 20:23:38 +03:00
# @unittest.skip("skip")
def test_drop_rel_during_backup_delta ( self ) :
""" """
2022-07-01 13:52:20 +03:00
self . _check_gdb_flag_or_skip_test ( )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-02-07 20:23:38 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-02-07 20:23:38 +03:00
set_replication = True ,
2019-03-01 19:19:56 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-02-07 20:23:38 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
2020-05-13 23:51:01 +03:00
node . pgbench_init ( scale = 10 )
2019-02-07 20:23:38 +03:00
node . safe_psql (
" postgres " ,
" create table t_heap as select i "
" as id from generate_series(0,100) i " )
relative_path = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' t_heap ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2019-02-07 20:23:38 +03:00
absolute_path = os . path . join ( node . data_dir , relative_path )
# FULL backup
self . backup_node ( backup_dir , ' node ' , node , options = [ ' --stream ' ] )
# DELTA backup
gdb = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' delta ' ,
2019-12-23 18:23:29 +03:00
gdb = True , options = [ ' --log-level-file=LOG ' ] )
2019-02-07 20:23:38 +03:00
gdb . set_breakpoint ( ' backup_files ' )
gdb . run_until_break ( )
# REMOVE file
node . safe_psql (
" postgres " ,
" DROP TABLE t_heap " )
node . safe_psql (
" postgres " ,
" CHECKPOINT " )
# File removed, we can proceed with backup
gdb . continue_execution_until_exit ( )
pgdata = self . pgdata_content ( node . data_dir )
with open ( os . path . join ( backup_dir , ' log ' , ' pg_probackup.log ' ) ) as f :
log_content = f . read ( )
self . assertTrue (
2020-06-19 10:09:04 +03:00
' LOG: File not found: " {0} " ' . format ( absolute_path ) in log_content ,
2020-05-22 23:22:54 +03:00
' File " {0} " should be deleted but it`s not ' . format ( absolute_path ) )
2019-02-07 20:23:38 +03:00
node . cleanup ( )
self . restore_node ( backup_dir , ' node ' , node , options = [ " -j " , " 4 " ] )
# Physical comparison
pgdata_restored = self . pgdata_content ( node . data_dir )
self . compare_pgdata ( pgdata , pgdata_restored )
# @unittest.skip("skip")
def test_drop_rel_during_backup_page ( self ) :
""" """
2022-07-01 13:52:20 +03:00
self . _check_gdb_flag_or_skip_test ( )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-02-07 20:23:38 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-02-07 20:23:38 +03:00
set_replication = True ,
2021-05-25 17:09:42 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-02-07 20:23:38 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
node . safe_psql (
" postgres " ,
" create table t_heap as select i "
" as id from generate_series(0,100) i " )
relative_path = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' t_heap ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2019-02-07 20:23:38 +03:00
absolute_path = os . path . join ( node . data_dir , relative_path )
# FULL backup
self . backup_node ( backup_dir , ' node ' , node , options = [ ' --stream ' ] )
2020-02-21 22:19:52 +03:00
node . safe_psql (
" postgres " ,
" insert into t_heap select i "
" as id from generate_series(101,102) i " )
2019-02-07 20:23:38 +03:00
# PAGE backup
gdb = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' ,
2019-12-23 18:23:29 +03:00
gdb = True , options = [ ' --log-level-file=LOG ' ] )
2019-02-07 20:23:38 +03:00
gdb . set_breakpoint ( ' backup_files ' )
gdb . run_until_break ( )
# REMOVE file
os . remove ( absolute_path )
# File removed, we can proceed with backup
gdb . continue_execution_until_exit ( )
2020-10-30 02:47:06 +03:00
gdb . kill ( )
2019-02-07 20:23:38 +03:00
pgdata = self . pgdata_content ( node . data_dir )
2020-02-21 22:19:52 +03:00
backup_id = self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' id ' ]
filelist = self . get_backup_filelist ( backup_dir , ' node ' , backup_id )
self . assertNotIn ( relative_path , filelist )
2019-02-07 20:23:38 +03:00
node . cleanup ( )
self . restore_node ( backup_dir , ' node ' , node , options = [ " -j " , " 4 " ] )
# Physical comparison
pgdata_restored = self . pgdata_content ( node . data_dir )
self . compare_pgdata ( pgdata , pgdata_restored )
2019-03-01 16:29:47 +03:00
# @unittest.skip("skip")
def test_persistent_slot_for_stream_backup ( self ) :
""" """
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-03-01 16:29:47 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-03-01 16:29:47 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
pg_options = {
' max_wal_size ' : ' 40MB ' } )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
node . safe_psql (
" postgres " ,
" SELECT pg_create_physical_replication_slot( ' slot_1 ' ) " )
# FULL backup
self . backup_node (
backup_dir , ' node ' , node ,
options = [ ' --stream ' , ' --slot=slot_1 ' ] )
# FULL backup
self . backup_node (
backup_dir , ' node ' , node ,
options = [ ' --stream ' , ' --slot=slot_1 ' ] )
# @unittest.skip("skip")
2019-05-28 12:41:03 +03:00
def test_basic_temp_slot_for_stream_backup ( self ) :
2019-03-01 16:29:47 +03:00
""" """
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-03-01 16:29:47 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-03-01 16:29:47 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2021-08-13 15:05:42 +03:00
pg_options = { ' max_wal_size ' : ' 40MB ' } )
if self . get_version ( node ) < self . version_to_num ( ' 10.0 ' ) :
2022-11-15 11:23:49 +03:00
self . skipTest ( ' You need PostgreSQL >= 10 for this test ' )
2019-03-01 16:29:47 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
2019-03-01 19:19:56 +03:00
# FULL backup
self . backup_node (
backup_dir , ' node ' , node ,
options = [ ' --stream ' , ' --temp-slot ' ] )
2019-03-01 16:29:47 +03:00
# FULL backup
self . backup_node (
backup_dir , ' node ' , node ,
options = [ ' --stream ' , ' --slot=slot_1 ' , ' --temp-slot ' ] )
2019-05-21 17:54:23 +03:00
# @unittest.skip("skip")
def test_backup_concurrent_drop_table ( self ) :
""" """
2022-07-01 13:52:20 +03:00
self . _check_gdb_flag_or_skip_test ( )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-05-21 17:54:23 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-05-21 17:54:23 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
2019-05-21 17:56:22 +03:00
node . pgbench_init ( scale = 1 )
2019-05-21 17:54:23 +03:00
# FULL backup
gdb = self . backup_node (
backup_dir , ' node ' , node ,
2019-05-21 19:29:49 +03:00
options = [ ' --stream ' , ' --compress ' ] ,
2019-05-21 17:54:23 +03:00
gdb = True )
gdb . set_breakpoint ( ' backup_data_file ' )
gdb . run_until_break ( )
node . safe_psql (
' postgres ' ,
' DROP TABLE pgbench_accounts ' )
# do checkpoint to guarantee filenode removal
node . safe_psql (
' postgres ' ,
' CHECKPOINT ' )
gdb . remove_all_breakpoints ( )
gdb . continue_execution_until_exit ( )
2020-10-30 02:47:06 +03:00
gdb . kill ( )
2019-05-21 17:54:23 +03:00
show_backup = self . show_pb ( backup_dir , ' node ' ) [ 0 ]
self . assertEqual ( show_backup [ ' status ' ] , " OK " )
2019-04-19 17:05:14 +03:00
# @unittest.skip("skip")
def test_pg_11_adjusted_wal_segment_size ( self ) :
""" """
2019-05-24 00:40:42 +03:00
if self . pg_config_version < self . version_to_num ( ' 11.0 ' ) :
2022-11-15 11:23:49 +03:00
self . skipTest ( ' You need PostgreSQL >= 11 for this test ' )
2019-05-24 00:40:42 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-04-19 17:05:14 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-04-19 17:05:14 +03:00
set_replication = True ,
initdb_params = [
' --data-checksums ' ,
' --wal-segsize=64 ' ] ,
pg_options = {
2021-05-25 17:09:42 +03:00
' min_wal_size ' : ' 128MB ' } )
2019-04-19 17:05:14 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
node . pgbench_init ( scale = 5 )
# FULL STREAM backup
self . backup_node (
backup_dir , ' node ' , node , options = [ ' --stream ' ] )
pgbench = node . pgbench ( options = [ ' -T ' , ' 5 ' , ' -c ' , ' 2 ' ] )
pgbench . wait ( )
# PAGE STREAM backup
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = ' page ' , options = [ ' --stream ' ] )
pgbench = node . pgbench ( options = [ ' -T ' , ' 5 ' , ' -c ' , ' 2 ' ] )
pgbench . wait ( )
# DELTA STREAM backup
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = ' delta ' , options = [ ' --stream ' ] )
pgbench = node . pgbench ( options = [ ' -T ' , ' 5 ' , ' -c ' , ' 2 ' ] )
pgbench . wait ( )
# FULL ARCHIVE backup
self . backup_node ( backup_dir , ' node ' , node )
pgbench = node . pgbench ( options = [ ' -T ' , ' 5 ' , ' -c ' , ' 2 ' ] )
pgbench . wait ( )
# PAGE ARCHIVE backup
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
pgbench = node . pgbench ( options = [ ' -T ' , ' 5 ' , ' -c ' , ' 2 ' ] )
pgbench . wait ( )
# DELTA ARCHIVE backup
backup_id = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' delta ' )
pgdata = self . pgdata_content ( node . data_dir )
# delete
output = self . delete_pb (
backup_dir , ' node ' ,
options = [
' --expired ' ,
' --delete-wal ' ,
' --retention-redundancy=1 ' ] )
# validate
self . validate_pb ( backup_dir )
# merge
self . merge_backup ( backup_dir , ' node ' , backup_id = backup_id )
# restore
node . cleanup ( )
self . restore_node (
backup_dir , ' node ' , node , backup_id = backup_id )
pgdata_restored = self . pgdata_content ( node . data_dir )
self . compare_pgdata ( pgdata , pgdata_restored )
2019-04-19 19:15:02 +03:00
# @unittest.skip("skip")
2019-04-19 19:32:02 +03:00
def test_sigint_handling ( self ) :
2019-04-19 19:15:02 +03:00
""" """
2022-07-01 13:52:20 +03:00
self . _check_gdb_flag_or_skip_test ( )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-04-19 19:15:02 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-04-19 19:15:02 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
# FULL backup
gdb = self . backup_node (
backup_dir , ' node ' , node , gdb = True ,
2019-12-23 18:23:29 +03:00
options = [ ' --stream ' , ' --log-level-file=LOG ' ] )
2019-04-19 19:15:02 +03:00
2020-02-21 22:19:52 +03:00
gdb . set_breakpoint ( ' backup_non_data_file ' )
2019-04-19 19:15:02 +03:00
gdb . run_until_break ( )
gdb . continue_execution_until_break ( 20 )
gdb . remove_all_breakpoints ( )
gdb . _execute ( ' signal SIGINT ' )
2019-06-05 16:41:36 +03:00
gdb . continue_execution_until_error ( )
2020-10-30 02:47:06 +03:00
gdb . kill ( )
2019-04-19 19:15:02 +03:00
backup_id = self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' id ' ]
self . assertEqual (
' ERROR ' ,
self . show_pb ( backup_dir , ' node ' , backup_id ) [ ' status ' ] ,
' Backup STATUS should be " ERROR " ' )
# @unittest.skip("skip")
2019-04-19 19:32:02 +03:00
def test_sigterm_handling ( self ) :
2019-04-19 19:15:02 +03:00
""" """
2022-07-01 13:52:20 +03:00
self . _check_gdb_flag_or_skip_test ( )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-04-19 19:15:02 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-04-19 19:15:02 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
# FULL backup
gdb = self . backup_node (
backup_dir , ' node ' , node , gdb = True ,
2019-12-23 18:23:29 +03:00
options = [ ' --stream ' , ' --log-level-file=LOG ' ] )
2019-04-19 19:15:02 +03:00
2020-02-21 22:19:52 +03:00
gdb . set_breakpoint ( ' backup_non_data_file ' )
2019-04-19 19:15:02 +03:00
gdb . run_until_break ( )
gdb . continue_execution_until_break ( 20 )
gdb . remove_all_breakpoints ( )
gdb . _execute ( ' signal SIGTERM ' )
2019-06-05 16:41:36 +03:00
gdb . continue_execution_until_error ( )
2019-04-19 19:15:02 +03:00
backup_id = self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' id ' ]
self . assertEqual (
' ERROR ' ,
self . show_pb ( backup_dir , ' node ' , backup_id ) [ ' status ' ] ,
' Backup STATUS should be " ERROR " ' )
2019-04-19 19:30:36 +03:00
# @unittest.skip("skip")
2019-04-19 19:32:02 +03:00
def test_sigquit_handling ( self ) :
2019-04-19 19:30:36 +03:00
""" """
2022-07-01 13:52:20 +03:00
self . _check_gdb_flag_or_skip_test ( )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-04-19 19:30:36 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-04-19 19:30:36 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
# FULL backup
gdb = self . backup_node (
2019-12-23 18:23:29 +03:00
backup_dir , ' node ' , node , gdb = True , options = [ ' --stream ' ] )
2019-04-19 19:30:36 +03:00
2020-02-21 22:19:52 +03:00
gdb . set_breakpoint ( ' backup_non_data_file ' )
2019-04-19 19:30:36 +03:00
gdb . run_until_break ( )
gdb . continue_execution_until_break ( 20 )
gdb . remove_all_breakpoints ( )
gdb . _execute ( ' signal SIGQUIT ' )
2019-06-05 16:41:36 +03:00
gdb . continue_execution_until_error ( )
2019-04-19 19:30:36 +03:00
backup_id = self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' id ' ]
self . assertEqual (
' ERROR ' ,
self . show_pb ( backup_dir , ' node ' , backup_id ) [ ' status ' ] ,
' Backup STATUS should be " ERROR " ' )
2019-05-28 19:51:19 +03:00
# @unittest.skip("skip")
def test_drop_table ( self ) :
""" """
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-05-28 19:51:19 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-05-28 19:51:19 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
connect_1 = node . connect ( " postgres " )
connect_1 . execute (
" create table t_heap as select i "
" as id from generate_series(0,100) i " )
connect_1 . commit ( )
connect_2 = node . connect ( " postgres " )
connect_2 . execute ( " SELECT * FROM t_heap " )
connect_2 . commit ( )
# DROP table
connect_2 . execute ( " DROP TABLE t_heap " )
connect_2 . commit ( )
# FULL backup
self . backup_node (
backup_dir , ' node ' , node , options = [ ' --stream ' ] )
2019-05-28 22:37:12 +03:00
# @unittest.skip("skip")
def test_basic_missing_file_permissions ( self ) :
""" """
2019-08-06 19:21:52 +03:00
if os . name == ' nt ' :
2022-11-15 11:23:49 +03:00
self . skipTest ( ' Skipped because it is POSIX only test ' )
2019-08-06 19:21:52 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-05-28 22:37:12 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-05-28 22:37:12 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
relative_path = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' pg_class ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2019-05-28 22:37:12 +03:00
full_path = os . path . join ( node . data_dir , relative_path )
os . chmod ( full_path , 000 )
try :
# FULL backup
self . backup_node (
backup_dir , ' node ' , node , options = [ ' --stream ' ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of missing permissions "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
2020-06-12 02:07:29 +03:00
' ERROR: Cannot open file ' ,
2019-05-28 22:37:12 +03:00
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
os . chmod ( full_path , 700 )
# @unittest.skip("skip")
def test_basic_missing_dir_permissions ( self ) :
""" """
2019-08-06 19:21:52 +03:00
if os . name == ' nt ' :
2022-11-15 11:23:49 +03:00
self . skipTest ( ' Skipped because it is POSIX only test ' )
2019-08-06 19:21:52 +03:00
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-05-28 22:37:12 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-05-28 22:37:12 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
full_path = os . path . join ( node . data_dir , ' pg_twophase ' )
os . chmod ( full_path , 000 )
try :
# FULL backup
self . backup_node (
backup_dir , ' node ' , node , options = [ ' --stream ' ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of missing permissions "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' ERROR: Cannot open directory ' ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
2020-12-29 19:07:17 +03:00
os . rmdir ( full_path )
2019-05-28 22:37:12 +03:00
2019-06-24 20:08:03 +03:00
# @unittest.skip("skip")
def test_backup_with_least_privileges_role ( self ) :
""" """
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-06-24 20:08:03 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-06-24 20:08:03 +03:00
set_replication = True ,
2019-12-23 18:23:29 +03:00
ptrack_enable = self . ptrack ,
2019-06-24 20:08:03 +03:00
initdb_params = [ ' --data-checksums ' ] ,
2019-12-23 18:23:29 +03:00
pg_options = { ' archive_timeout ' : ' 30s ' } )
2019-06-24 20:08:03 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
node . safe_psql (
' postgres ' ,
' CREATE DATABASE backupdb ' )
2021-06-18 12:12:37 +03:00
if self . ptrack :
2019-12-23 18:23:29 +03:00
node . safe_psql (
" backupdb " ,
2021-06-18 12:12:37 +03:00
" CREATE SCHEMA ptrack; "
" CREATE EXTENSION ptrack WITH SCHEMA ptrack " )
2019-12-23 18:23:29 +03:00
2019-07-15 19:24:31 +03:00
# PG 9.5
2019-07-12 18:01:28 +03:00
if self . get_version ( node ) < 90600 :
node . safe_psql (
' backupdb ' ,
" REVOKE ALL ON DATABASE backupdb from PUBLIC; "
" REVOKE ALL ON SCHEMA public from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
" CREATE ROLE backup WITH LOGIN REPLICATION; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
2020-01-07 21:15:53 +03:00
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
2019-07-12 18:01:28 +03:00
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
2021-10-13 21:18:03 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
2019-07-12 18:01:28 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
2021-10-13 21:18:03 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
2019-07-12 18:01:28 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-08-28 00:39:33 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " )
2019-07-15 19:24:31 +03:00
# PG 9.6
elif self . get_version ( node ) > 90600 and self . get_version ( node ) < 100000 :
node . safe_psql (
' backupdb ' ,
" REVOKE ALL ON DATABASE backupdb from PUBLIC; "
" REVOKE ALL ON SCHEMA public from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
" CREATE ROLE backup WITH LOGIN REPLICATION; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
2020-01-07 21:15:53 +03:00
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
2019-07-15 19:24:31 +03:00
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
2021-10-13 21:18:03 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
2019-07-15 19:24:31 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
2021-10-13 21:18:03 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
2019-07-15 19:24:31 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-07-01 15:46:53 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
)
# >= 10 && < 15
elif self . get_version ( node ) > = 100000 and self . get_version ( node ) < 150000 :
2019-07-12 18:01:28 +03:00
node . safe_psql (
' backupdb ' ,
" REVOKE ALL ON DATABASE backupdb from PUBLIC; "
" REVOKE ALL ON SCHEMA public from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
" CREATE ROLE backup WITH LOGIN REPLICATION; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
2020-01-07 21:15:53 +03:00
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
2019-07-12 18:01:28 +03:00
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
2019-12-23 18:23:29 +03:00
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
2019-07-12 18:01:28 +03:00
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
2021-10-13 21:18:03 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
2019-07-12 18:01:28 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
2021-10-13 21:18:03 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
2019-07-12 18:01:28 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-07-01 15:46:53 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
)
# >= 15
else :
node . safe_psql (
' backupdb ' ,
" REVOKE ALL ON DATABASE backupdb from PUBLIC; "
" REVOKE ALL ON SCHEMA public from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
" CREATE ROLE backup WITH LOGIN REPLICATION; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
" GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
)
2019-07-12 18:01:28 +03:00
if self . ptrack :
2021-06-18 12:12:37 +03:00
node . safe_psql (
" backupdb " ,
" GRANT USAGE ON SCHEMA ptrack TO backup " )
node . safe_psql (
" backupdb " ,
" GRANT EXECUTE ON FUNCTION ptrack.ptrack_get_pagemapset(pg_lsn) TO backup; "
2021-10-13 05:30:20 +03:00
" GRANT EXECUTE ON FUNCTION ptrack.ptrack_init_lsn() TO backup; " )
2019-12-23 18:23:29 +03:00
if ProbackupTest . enterprise :
node . safe_psql (
" backupdb " ,
2022-08-28 00:39:33 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; " )
2019-06-24 20:08:03 +03:00
# FULL backup
self . backup_node (
backup_dir , ' node ' , node ,
datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
self . backup_node (
backup_dir , ' node ' , node ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
# PAGE
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' , datname = ' backupdb ' ,
options = [ ' --stream ' , ' -U ' , ' backup ' ] )
# DELTA
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' delta ' ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' delta ' ,
datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
# PTRACK
2019-07-12 18:01:28 +03:00
if self . ptrack :
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' ptrack ' ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' ptrack ' ,
datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
2019-06-24 20:08:03 +03:00
2019-06-27 19:43:03 +03:00
# @unittest.skip("skip")
def test_parent_choosing ( self ) :
"""
PAGE3 < - RUNNING ( parent should be FULL )
PAGE2 < - OK
PAGE1 < - CORRUPT
FULL
"""
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-06-27 19:43:03 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-06-27 19:43:03 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
full_id = self . backup_node ( backup_dir , ' node ' , node )
# PAGE1
page1_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
# PAGE2
page2_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
# Change PAGE1 to ERROR
self . change_backup_status ( backup_dir , ' node ' , page1_id , ' ERROR ' )
# PAGE3
page3_id = self . backup_node (
backup_dir , ' node ' , node ,
backup_type = ' page ' , options = [ ' --log-level-file=LOG ' ] )
log_file_path = os . path . join ( backup_dir , ' log ' , ' pg_probackup.log ' )
with open ( log_file_path ) as f :
log_file_content = f . read ( )
self . assertIn (
" WARNING: Backup {0} has invalid parent: {1} . "
" Cannot be a parent " . format ( page2_id , page1_id ) ,
log_file_content )
self . assertIn (
" WARNING: Backup {0} has status: ERROR. "
" Cannot be a parent " . format ( page1_id ) ,
log_file_content )
self . assertIn (
" Parent backup: {0} " . format ( full_id ) ,
log_file_content )
self . assertEqual (
self . show_pb (
backup_dir , ' node ' , backup_id = page3_id ) [ ' parent-backup-id ' ] ,
full_id )
# @unittest.skip("skip")
def test_parent_choosing_1 ( self ) :
"""
PAGE3 < - RUNNING ( parent should be FULL )
PAGE2 < - OK
PAGE1 < - ( missing )
FULL
"""
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-06-27 19:43:03 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-06-27 19:43:03 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
full_id = self . backup_node ( backup_dir , ' node ' , node )
# PAGE1
page1_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
# PAGE2
page2_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
# Delete PAGE1
shutil . rmtree (
os . path . join ( backup_dir , ' backups ' , ' node ' , page1_id ) )
# PAGE3
page3_id = self . backup_node (
backup_dir , ' node ' , node ,
backup_type = ' page ' , options = [ ' --log-level-file=LOG ' ] )
log_file_path = os . path . join ( backup_dir , ' log ' , ' pg_probackup.log ' )
with open ( log_file_path ) as f :
log_file_content = f . read ( )
self . assertIn (
" WARNING: Backup {0} has missing parent: {1} . "
" Cannot be a parent " . format ( page2_id , page1_id ) ,
log_file_content )
self . assertIn (
" Parent backup: {0} " . format ( full_id ) ,
log_file_content )
self . assertEqual (
self . show_pb (
backup_dir , ' node ' , backup_id = page3_id ) [ ' parent-backup-id ' ] ,
full_id )
# @unittest.skip("skip")
def test_parent_choosing_2 ( self ) :
"""
PAGE3 < - RUNNING ( backup should fail )
PAGE2 < - OK
PAGE1 < - OK
FULL < - ( missing )
"""
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-06-27 19:43:03 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-06-27 19:43:03 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
full_id = self . backup_node ( backup_dir , ' node ' , node )
# PAGE1
page1_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
# PAGE2
page2_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' )
# Delete FULL
shutil . rmtree (
os . path . join ( backup_dir , ' backups ' , ' node ' , full_id ) )
# PAGE3
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = ' page ' , options = [ ' --log-level-file=LOG ' ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because FULL backup is missing "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2020-04-03 18:08:53 +03:00
self . assertTrue (
2021-04-22 17:36:04 +03:00
' WARNING: Valid full backup on current timeline 1 is not found ' in e . message and
2020-04-03 18:08:53 +03:00
' ERROR: Create new full backup before an incremental one ' in e . message ,
2019-06-27 19:43:03 +03:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertEqual (
self . show_pb (
backup_dir , ' node ' ) [ 2 ] [ ' status ' ] ,
' ERROR ' )
2021-03-22 21:26:32 +03:00
# @unittest.skip("skip")
2019-07-23 11:05:59 +03:00
def test_backup_with_less_privileges_role ( self ) :
"""
check permissions correctness from documentation :
https : / / github . com / postgrespro / pg_probackup / blob / master / Documentation . md #configuring-the-database-cluster
"""
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-07-23 11:05:59 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-07-23 11:05:59 +03:00
set_replication = True ,
2019-12-23 18:23:29 +03:00
ptrack_enable = self . ptrack ,
2019-07-23 11:05:59 +03:00
initdb_params = [ ' --data-checksums ' ] ,
pg_options = {
' archive_timeout ' : ' 30s ' ,
2020-04-03 18:08:53 +03:00
' archive_mode ' : ' always ' ,
' checkpoint_timeout ' : ' 60s ' ,
' wal_level ' : ' logical ' } )
2019-07-23 11:05:59 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
2020-04-03 18:08:53 +03:00
self . set_config ( backup_dir , ' node ' , options = [ ' --archive-timeout=60s ' ] )
2019-07-23 11:05:59 +03:00
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
node . safe_psql (
' postgres ' ,
' CREATE DATABASE backupdb ' )
2021-06-18 12:12:37 +03:00
if self . ptrack :
2019-12-23 18:23:29 +03:00
node . safe_psql (
' backupdb ' ,
' CREATE EXTENSION ptrack ' )
2019-07-23 11:05:59 +03:00
# PG 9.5
if self . get_version ( node ) < 90600 :
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-08-28 00:39:33 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " )
2019-07-23 11:05:59 +03:00
# PG 9.6
elif self . get_version ( node ) > 90600 and self . get_version ( node ) < 100000 :
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-07-01 15:46:53 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
" COMMIT; "
)
# >= 10 && < 15
elif self . get_version ( node ) > = 100000 and self . get_version ( node ) < 150000 :
2019-07-23 11:05:59 +03:00
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-07-01 15:46:53 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
" COMMIT; "
)
# >= 15
else :
node . safe_psql (
' backupdb ' ,
" BEGIN; "
" CREATE ROLE backup WITH LOGIN; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
" COMMIT; "
)
2019-07-23 11:05:59 +03:00
# enable STREAM backup
node . safe_psql (
' backupdb ' ,
' ALTER ROLE backup WITH REPLICATION; ' )
# FULL backup
self . backup_node (
backup_dir , ' node ' , node ,
datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
self . backup_node (
backup_dir , ' node ' , node ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
# PAGE
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' , datname = ' backupdb ' ,
options = [ ' --stream ' , ' -U ' , ' backup ' ] )
# DELTA
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' delta ' ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' delta ' ,
datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
2019-12-23 18:23:29 +03:00
# PTRACK
if self . ptrack :
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' ptrack ' ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' ptrack ' ,
datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
2019-10-24 18:01:31 +03:00
if self . get_version ( node ) < 90600 :
return
2019-07-23 11:05:59 +03:00
# Restore as replica
replica = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' replica ' ) )
2019-07-23 11:05:59 +03:00
replica . cleanup ( )
self . restore_node ( backup_dir , ' node ' , replica )
self . set_replica ( node , replica )
self . add_instance ( backup_dir , ' replica ' , replica )
2020-04-03 18:08:53 +03:00
self . set_config (
backup_dir , ' replica ' ,
options = [ ' --archive-timeout=120s ' , ' --log-level-console=LOG ' ] )
2019-07-23 11:05:59 +03:00
self . set_archiving ( backup_dir , ' replica ' , replica , replica = True )
2019-11-01 15:23:31 +03:00
self . set_auto_conf ( replica , { ' hot_standby ' : ' on ' } )
2019-07-23 11:05:59 +03:00
2019-10-11 19:03:21 +03:00
# freeze bgwriter to get rid of RUNNING XACTS records
2020-04-03 18:08:53 +03:00
# bgwriter_pid = node.auxiliary_pids[ProcessType.BackgroundWriter][0]
# gdb_checkpointer = self.gdb_attach(bgwriter_pid)
2019-10-11 19:03:21 +03:00
2019-09-17 17:35:27 +03:00
copy_tree (
os . path . join ( backup_dir , ' wal ' , ' node ' ) ,
os . path . join ( backup_dir , ' wal ' , ' replica ' ) )
2019-07-23 11:05:59 +03:00
replica . slow_start ( replica = True )
2020-04-03 18:08:53 +03:00
# self.switch_wal_segment(node)
2022-03-28 06:30:39 +03:00
# self.switch_wal_segment(node)
self . backup_node (
backup_dir , ' replica ' , replica ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
2019-09-17 17:35:27 +03:00
2020-04-03 18:08:53 +03:00
# stream full backup from replica
2019-07-23 11:05:59 +03:00
self . backup_node (
2020-04-03 18:08:53 +03:00
backup_dir , ' replica ' , replica ,
datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
2022-03-28 06:30:39 +03:00
# self.switch_wal_segment(node)
2019-07-23 20:10:58 +03:00
# PAGE backup from replica
2022-03-28 06:30:39 +03:00
self . switch_wal_segment ( node )
self . backup_node (
backup_dir , ' replica ' , replica , backup_type = ' page ' ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' , ' --archive-timeout=30s ' ] )
2019-09-17 17:35:27 +03:00
2019-07-23 11:05:59 +03:00
self . backup_node (
backup_dir , ' replica ' , replica , backup_type = ' page ' ,
datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
2019-07-23 20:10:58 +03:00
# DELTA backup from replica
2022-03-28 06:30:39 +03:00
self . switch_wal_segment ( node )
self . backup_node (
backup_dir , ' replica ' , replica , backup_type = ' delta ' ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
2019-07-23 11:05:59 +03:00
self . backup_node (
backup_dir , ' replica ' , replica , backup_type = ' delta ' ,
datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
2019-12-23 18:23:29 +03:00
# PTRACK backup from replica
if self . ptrack :
2022-03-28 06:30:39 +03:00
self . switch_wal_segment ( node )
self . backup_node (
backup_dir , ' replica ' , replica , backup_type = ' ptrack ' ,
datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] )
2019-12-23 18:23:29 +03:00
self . backup_node (
2020-04-03 18:08:53 +03:00
backup_dir , ' replica ' , replica , backup_type = ' ptrack ' ,
2019-12-23 18:23:29 +03:00
datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
2020-05-11 15:25:29 +03:00
@unittest.skip ( " skip " )
2019-10-14 21:54:31 +03:00
def test_issue_132 ( self ) :
"""
https : / / github . com / postgrespro / pg_probackup / issues / 132
"""
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-10-14 21:54:31 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-10-14 21:54:31 +03:00
set_replication = True ,
2021-05-25 17:09:42 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-10-14 21:54:31 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
2019-10-15 00:45:09 +03:00
with node . connect ( " postgres " ) as conn :
for i in range ( 50000 ) :
conn . execute (
" CREATE TABLE t_ {0} as select 1 " . format ( i ) )
conn . commit ( )
2019-10-14 21:54:31 +03:00
self . backup_node (
backup_dir , ' node ' , node , options = [ ' --stream ' ] )
pgdata = self . pgdata_content ( node . data_dir )
node . cleanup ( )
self . restore_node ( backup_dir , ' node ' , node )
pgdata_restored = self . pgdata_content ( node . data_dir )
self . compare_pgdata ( pgdata , pgdata_restored )
2020-05-11 15:25:29 +03:00
exit ( 1 )
2019-12-24 15:03:59 +03:00
@unittest.skip ( " skip " )
2019-10-15 12:52:52 +03:00
def test_issue_132_1 ( self ) :
"""
https : / / github . com / postgrespro / pg_probackup / issues / 132
"""
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2019-10-15 12:52:52 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2019-10-15 12:52:52 +03:00
set_replication = True ,
2021-05-25 17:09:42 +03:00
initdb_params = [ ' --data-checksums ' ] )
2019-10-15 12:52:52 +03:00
# TODO: check version of old binary, it should be 2.1.4, 2.1.5 or 2.2.1
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
with node . connect ( " postgres " ) as conn :
for i in range ( 30000 ) :
conn . execute (
" CREATE TABLE t_ {0} as select 1 " . format ( i ) )
conn . commit ( )
full_id = self . backup_node (
backup_dir , ' node ' , node , options = [ ' --stream ' ] , old_binary = True )
delta_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' delta ' ,
options = [ ' --stream ' ] , old_binary = True )
node . cleanup ( )
# make sure that new binary can detect corruption
try :
self . validate_pb ( backup_dir , ' node ' , backup_id = full_id )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because FULL backup is CORRUPT "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' WARNING: Backup {0} is a victim of metadata corruption ' . format ( full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
try :
self . validate_pb ( backup_dir , ' node ' , backup_id = delta_id )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because FULL backup is CORRUPT "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' WARNING: Backup {0} is a victim of metadata corruption ' . format ( full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertEqual (
' CORRUPT ' , self . show_pb ( backup_dir , ' node ' , full_id ) [ ' status ' ] ,
' Backup STATUS should be " CORRUPT " ' )
self . assertEqual (
' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , delta_id ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
# check that revalidation is working correctly
try :
self . restore_node (
backup_dir , ' node ' , node , backup_id = delta_id )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because FULL backup is CORRUPT "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
' WARNING: Backup {0} is a victim of metadata corruption ' . format ( full_id ) ,
e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format (
repr ( e . message ) , self . cmd ) )
self . assertEqual (
' CORRUPT ' , self . show_pb ( backup_dir , ' node ' , full_id ) [ ' status ' ] ,
' Backup STATUS should be " CORRUPT " ' )
self . assertEqual (
' ORPHAN ' , self . show_pb ( backup_dir , ' node ' , delta_id ) [ ' status ' ] ,
' Backup STATUS should be " ORPHAN " ' )
# check that '--no-validate' do not allow to restore ORPHAN backup
# try:
# self.restore_node(
# backup_dir, 'node', node, backup_id=delta_id,
# options=['--no-validate'])
# # we should die here because exception is what we expect to happen
# self.assertEqual(
# 1, 0,
# "Expecting Error because FULL backup is CORRUPT"
# "\n Output: {0} \n CMD: {1}".format(
# repr(self.output), self.cmd))
# except ProbackupException as e:
# self.assertIn(
# 'Insert data',
# e.message,
# '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
# repr(e.message), self.cmd))
node . cleanup ( )
output = self . restore_node (
backup_dir , ' node ' , node , backup_id = full_id , options = [ ' --force ' ] )
2019-10-15 13:11:58 +03:00
self . assertIn (
' WARNING: Backup {0} has status: CORRUPT ' . format ( full_id ) ,
output )
self . assertIn (
' WARNING: Backup {0} is corrupt. ' . format ( full_id ) ,
output )
2019-10-15 12:52:52 +03:00
self . assertIn (
' WARNING: Backup {0} is not valid, restore is forced ' . format ( full_id ) ,
output )
self . assertIn (
' INFO: Restore of backup {0} completed. ' . format ( full_id ) ,
output )
node . cleanup ( )
output = self . restore_node (
backup_dir , ' node ' , node , backup_id = delta_id , options = [ ' --force ' ] )
self . assertIn (
' WARNING: Backup {0} is orphan. ' . format ( delta_id ) ,
output )
self . assertIn (
' WARNING: Backup {0} is not valid, restore is forced ' . format ( full_id ) ,
output )
self . assertIn (
' WARNING: Backup {0} is not valid, restore is forced ' . format ( delta_id ) ,
output )
self . assertIn (
' INFO: Restore of backup {0} completed. ' . format ( delta_id ) ,
output )
2020-04-18 16:40:38 +03:00
def test_note_sanity ( self ) :
2019-10-21 13:46:48 +03:00
"""
2020-04-18 16:40:38 +03:00
test that adding note to backup works as expected
2019-10-21 13:46:48 +03:00
"""
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2020-03-25 00:16:53 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2020-04-22 01:17:06 +03:00
set_replication = True ,
2020-03-25 00:16:53 +03:00
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
# FULL backup
2020-04-19 02:50:49 +03:00
backup_id = self . backup_node (
2020-04-18 16:40:38 +03:00
backup_dir , ' node ' , node ,
options = [ ' --stream ' , ' --log-level-file=LOG ' , ' --note=test_note ' ] )
2020-03-25 00:16:53 +03:00
2020-04-18 16:40:38 +03:00
show_backups = self . show_pb ( backup_dir , ' node ' )
2020-03-25 00:16:53 +03:00
2020-04-18 16:40:38 +03:00
print ( self . show_pb ( backup_dir , as_text = True , as_json = True ) )
2020-03-25 00:16:53 +03:00
2020-04-18 16:40:38 +03:00
self . assertEqual ( show_backups [ 0 ] [ ' note ' ] , " test_note " )
2020-03-25 00:16:53 +03:00
2020-04-19 02:50:49 +03:00
self . set_backup ( backup_dir , ' node ' , backup_id , options = [ ' --note=none ' ] )
backup_meta = self . show_pb ( backup_dir , ' node ' , backup_id )
self . assertNotIn (
' note ' ,
backup_meta )
2020-04-06 21:00:12 +10:00
# @unittest.skip("skip")
def test_parent_backup_made_by_newer_version ( self ) :
""" incremental backup with parent made by newer version """
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2020-04-06 21:00:12 +10:00
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2020-04-06 21:00:12 +10:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
backup_id = self . backup_node ( backup_dir , ' node ' , node )
control_file = os . path . join (
backup_dir , " backups " , " node " , backup_id ,
" backup.control " )
version = self . probackup_version
fake_new_version = str ( int ( version . split ( ' . ' ) [ 0 ] ) + 1 ) + ' .0.0 '
with open ( control_file , ' r ' ) as f :
data = f . read ( ) ;
data = data . replace ( version , fake_new_version )
with open ( control_file , ' w ' ) as f :
f . write ( data ) ;
try :
self . backup_node ( backup_dir , ' node ' , node , backup_type = " page " )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because incremental backup should not be possible "
" if parent made by newer version. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
" pg_probackup do not guarantee to be forward compatible. "
" Please upgrade pg_probackup binary. " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
self . assertEqual (
self . show_pb ( backup_dir , ' node ' ) [ 1 ] [ ' status ' ] , " ERROR " )
2021-01-20 15:40:18 +03:00
# @unittest.skip("skip")
def test_issue_289 ( self ) :
"""
https : / / github . com / postgrespro / pg_probackup / issues / 289
"""
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-01-20 15:40:18 +03:00
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2021-01-20 15:40:18 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
try :
self . backup_node (
backup_dir , ' node ' , node ,
backup_type = ' page ' , options = [ ' --archive-timeout=10s ' ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because full backup is missing "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertNotIn (
" INFO: Wait for WAL segment " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
" ERROR: Create new full backup before an incremental one " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
self . assertEqual (
self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] , " ERROR " )
2021-01-20 15:55:25 +03:00
# @unittest.skip("skip")
def test_issue_290 ( self ) :
"""
https : / / github . com / postgrespro / pg_probackup / issues / 290
"""
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-01-20 15:55:25 +03:00
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2021-01-20 15:55:25 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
os . rmdir (
os . path . join ( backup_dir , " wal " , " node " ) )
node . slow_start ( )
try :
self . backup_node (
backup_dir , ' node ' , node ,
options = [ ' --archive-timeout=10s ' ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because full backup is missing "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertNotIn (
" INFO: Wait for WAL segment " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
" WAL archive directory is not accessible " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
self . assertEqual (
self . show_pb ( backup_dir , ' node ' ) [ 0 ] [ ' status ' ] , " ERROR " )
2021-01-20 15:40:18 +03:00
2020-05-13 17:10:47 +03:00
@unittest.skip ( " skip " )
def test_issue_203 ( self ) :
"""
https : / / github . com / postgrespro / pg_probackup / issues / 203
"""
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2020-05-13 17:10:47 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2020-05-13 17:10:47 +03:00
set_replication = True ,
2021-05-25 17:09:42 +03:00
initdb_params = [ ' --data-checksums ' ] )
2020-05-13 17:10:47 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
with node . connect ( " postgres " ) as conn :
2020-05-19 19:29:05 +03:00
for i in range ( 1000000 ) :
2020-05-13 17:10:47 +03:00
conn . execute (
" CREATE TABLE t_ {0} as select 1 " . format ( i ) )
conn . commit ( )
full_id = self . backup_node (
2020-05-19 19:29:05 +03:00
backup_dir , ' node ' , node , options = [ ' --stream ' , ' -j2 ' ] )
2020-05-13 17:10:47 +03:00
pgdata = self . pgdata_content ( node . data_dir )
node_restored = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node_restored ' ) )
2020-05-13 17:10:47 +03:00
node_restored . cleanup ( )
self . restore_node ( backup_dir , ' node ' ,
node_restored , data_dir = node_restored . data_dir )
pgdata_restored = self . pgdata_content ( node_restored . data_dir )
self . compare_pgdata ( pgdata , pgdata_restored )
2020-12-11 14:53:42 +03:00
# @unittest.skip("skip")
def test_issue_231 ( self ) :
"""
https : / / github . com / postgrespro / pg_probackup / issues / 231
"""
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2020-12-11 14:53:42 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2020-12-11 14:53:42 +03:00
set_replication = True ,
2021-05-25 17:09:42 +03:00
initdb_params = [ ' --data-checksums ' ] )
2020-12-11 14:53:42 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
2020-12-11 15:05:29 +03:00
datadir = os . path . join ( node . data_dir , ' 123 ' )
2022-12-05 15:49:12 +03:00
pb1 = self . backup_node ( backup_dir , ' node ' , node , data_dir = ' {0} ' . format ( datadir ) )
pb2 = self . backup_node ( backup_dir , ' node ' , node , options = [ ' --stream ' ] )
2021-01-20 15:11:54 +03:00
2022-12-05 15:49:12 +03:00
self . assertNotEqual ( pb1 , pb2 )
2020-12-11 14:53:42 +03:00
2021-02-08 22:25:58 +03:00
def test_incr_backup_filenode_map ( self ) :
"""
https : / / github . com / postgrespro / pg_probackup / issues / 320
"""
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2021-02-08 22:25:58 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-02-08 22:25:58 +03:00
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
node1 = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node1 ' ) ,
2021-02-08 22:25:58 +03:00
initdb_params = [ ' --data-checksums ' ] )
node1 . cleanup ( )
node . pgbench_init ( scale = 5 )
# FULL backup
backup_id = self . backup_node ( backup_dir , ' node ' , node )
pgbench = node . pgbench (
stdout = subprocess . PIPE , stderr = subprocess . STDOUT ,
options = [ ' -T ' , ' 10 ' , ' -c ' , ' 1 ' ] )
backup_id = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' delta ' )
node . safe_psql (
' postgres ' ,
' reindex index pg_type_oid_index ' )
backup_id = self . backup_node (
backup_dir , ' node ' , node , backup_type = ' delta ' )
# incremental restore into node1
node . cleanup ( )
self . restore_node ( backup_dir , ' node ' , node )
node . slow_start ( )
node . safe_psql (
' postgres ' ,
' select 1 ' )
2021-03-22 21:26:32 +03:00
# @unittest.skip("skip")
def test_missing_wal_segment ( self ) :
""" """
2022-07-01 13:52:20 +03:00
self . _check_gdb_flag_or_skip_test ( )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2021-03-22 21:26:32 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-03-22 21:26:32 +03:00
set_replication = True ,
ptrack_enable = self . ptrack ,
initdb_params = [ ' --data-checksums ' ] ,
pg_options = { ' archive_timeout ' : ' 30s ' } )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
node . pgbench_init ( scale = 10 )
node . safe_psql (
' postgres ' ,
' CREATE DATABASE backupdb ' )
# get segments in pg_wal, sort then and remove all but the latest
pg_wal_dir = os . path . join ( node . data_dir , ' pg_wal ' )
if node . major_version > = 10 :
pg_wal_dir = os . path . join ( node . data_dir , ' pg_wal ' )
else :
pg_wal_dir = os . path . join ( node . data_dir , ' pg_xlog ' )
# Full backup in streaming mode
gdb = self . backup_node (
backup_dir , ' node ' , node , datname = ' backupdb ' ,
options = [ ' --stream ' , ' --log-level-file=INFO ' ] , gdb = True )
# break at streaming start
gdb . set_breakpoint ( ' start_WAL_streaming ' )
gdb . run_until_break ( )
# generate some more data
node . pgbench_init ( scale = 3 )
# remove redundant WAL segments in pg_wal
files = os . listdir ( pg_wal_dir )
files . sort ( reverse = True )
# leave first two files in list
del files [ : 2 ]
for filename in files :
os . remove ( os . path . join ( pg_wal_dir , filename ) )
gdb . continue_execution_until_exit ( )
self . assertIn (
' unexpected termination of replication stream: ERROR: requested WAL segment ' ,
gdb . output )
self . assertIn (
' has already been removed ' ,
gdb . output )
self . assertIn (
' ERROR: Interrupted during waiting for WAL streaming ' ,
gdb . output )
self . assertIn (
' WARNING: backup in progress, stop backup ' ,
gdb . output )
# TODO: check the same for PAGE backup
2021-03-26 00:30:29 +03:00
# @unittest.skip("skip")
def test_missing_replication_permission ( self ) :
""" """
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2021-03-26 00:30:29 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-03-26 00:30:29 +03:00
set_replication = True ,
ptrack_enable = self . ptrack ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
# self.set_archiving(backup_dir, 'node', node)
node . slow_start ( )
# FULL backup
self . backup_node ( backup_dir , ' node ' , node , options = [ ' --stream ' ] )
# Create replica
replica = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' replica ' ) )
2021-03-26 00:30:29 +03:00
replica . cleanup ( )
self . restore_node ( backup_dir , ' node ' , replica )
# Settings for Replica
self . set_replica ( node , replica )
replica . slow_start ( replica = True )
node . safe_psql (
' postgres ' ,
' CREATE DATABASE backupdb ' )
# PG 9.5
if self . get_version ( node ) < 90600 :
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " )
# PG 9.6
elif self . get_version ( node ) > 90600 and self . get_version ( node ) < 100000 :
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-08-28 00:39:33 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " )
2022-07-01 15:46:53 +03:00
# >= 10 && < 15
elif self . get_version ( node ) > = 100000 and self . get_version ( node ) < 150000 :
2021-03-26 00:30:29 +03:00
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-07-01 15:46:53 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
)
# >= 15
else :
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
)
2021-03-26 00:30:29 +03:00
if ProbackupTest . enterprise :
node . safe_psql (
" backupdb " ,
2022-08-28 00:39:33 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; " )
2021-03-26 00:30:29 +03:00
sleep ( 2 )
replica . promote ( )
# Delta backup
try :
self . backup_node (
backup_dir , ' node ' , replica , backup_type = ' delta ' ,
data_dir = replica . data_dir , datname = ' backupdb ' , options = [ ' --stream ' , ' -U ' , ' backup ' ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because incremental backup should not be possible "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2021-10-13 21:18:03 +03:00
# 9.5: ERROR: must be superuser or replication role to run a backup
# >=9.6: FATAL: must be superuser or replication role to start walsender
self . assertRegex (
2021-03-26 00:30:29 +03:00
e . message ,
2021-10-13 21:18:03 +03:00
" ERROR: must be superuser or replication role to run a backup|FATAL: must be superuser or replication role to start walsender " ,
2021-03-26 00:30:29 +03:00
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
# @unittest.skip("skip")
def test_missing_replication_permission_1 ( self ) :
""" """
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2021-03-26 00:30:29 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-03-26 00:30:29 +03:00
set_replication = True ,
ptrack_enable = self . ptrack ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
# FULL backup
self . backup_node ( backup_dir , ' node ' , node , options = [ ' --stream ' ] )
# Create replica
replica = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' replica ' ) )
2021-03-26 00:30:29 +03:00
replica . cleanup ( )
self . restore_node ( backup_dir , ' node ' , replica )
# Settings for Replica
self . set_replica ( node , replica )
replica . slow_start ( replica = True )
node . safe_psql (
' postgres ' ,
' CREATE DATABASE backupdb ' )
# PG 9.5
if self . get_version ( node ) < 90600 :
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-08-28 00:39:33 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " )
2021-03-26 00:30:29 +03:00
# PG 9.6
elif self . get_version ( node ) > 90600 and self . get_version ( node ) < 100000 :
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-07-01 15:46:53 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
)
# >= 10 && < 15
elif self . get_version ( node ) > = 100000 and self . get_version ( node ) < 150000 :
2021-03-26 00:30:29 +03:00
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2022-07-01 15:46:53 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
)
# > 15
else :
node . safe_psql (
' backupdb ' ,
" CREATE ROLE backup WITH LOGIN; "
" GRANT CONNECT ON DATABASE backupdb to backup; "
" GRANT USAGE ON SCHEMA pg_catalog TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
" GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
" GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
)
2021-03-26 00:30:29 +03:00
if ProbackupTest . enterprise :
node . safe_psql (
" backupdb " ,
2022-08-28 00:39:33 +03:00
" GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "
" GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; " )
2021-03-26 00:30:29 +03:00
replica . promote ( )
# PAGE
output = self . backup_node (
backup_dir , ' node ' , replica , backup_type = ' page ' ,
data_dir = replica . data_dir , datname = ' backupdb ' , options = [ ' -U ' , ' backup ' ] ,
return_id = False )
self . assertIn (
2021-04-22 17:36:04 +03:00
' WARNING: Valid full backup on current timeline 2 is not found, trying to look up on previous timelines ' ,
2021-03-26 00:30:29 +03:00
output )
2021-10-13 21:18:03 +03:00
# Messages before 14
# 'WARNING: could not connect to database backupdb: FATAL: must be superuser or replication role to start walsender'
# Messages for >=14
# 'WARNING: could not connect to database backupdb: connection to server on socket "/tmp/.s.PGSQL.30983" failed: FATAL: must be superuser or replication role to start walsender'
2021-10-18 17:40:43 +03:00
# 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL: must be superuser or replication role to start walsender'
2021-10-13 21:18:03 +03:00
self . assertRegex (
output ,
2021-10-18 17:40:43 +03:00
r ' WARNING: could not connect to database backupdb: (connection to server (on socket " /tmp/.s.PGSQL. \ d+ " |at " localhost " \ (127.0.0.1 \ ), port \ d+) failed: ) { 0,1} '
' FATAL: must be superuser or replication role to start walsender ' )
2021-03-26 00:30:29 +03:00
2021-03-29 13:06:09 +03:00
# @unittest.skip("skip")
def test_basic_backup_default_transaction_read_only ( self ) :
""" """
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2021-03-29 13:06:09 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-03-29 13:06:09 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
pg_options = { ' default_transaction_read_only ' : ' on ' } )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
try :
node . safe_psql (
' postgres ' ,
' create temp table t1() ' )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because incremental backup should not be possible "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except QueryException as e :
self . assertIn (
" cannot execute CREATE TABLE in a read-only transaction " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
# FULL backup
self . backup_node (
backup_dir , ' node ' , node ,
2021-08-13 15:05:42 +03:00
options = [ ' --stream ' ] )
2021-03-29 13:06:09 +03:00
# DELTA backup
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' delta ' , options = [ ' --stream ' ] )
# PAGE backup
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
2021-05-18 14:59:00 +03:00
# @unittest.skip("skip")
def test_backup_atexit ( self ) :
""" """
2022-07-01 13:52:20 +03:00
self . _check_gdb_flag_or_skip_test ( )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2021-05-18 14:59:00 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-05-18 14:59:00 +03:00
set_replication = True ,
ptrack_enable = self . ptrack ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
node . pgbench_init ( scale = 5 )
# Full backup in streaming mode
gdb = self . backup_node (
backup_dir , ' node ' , node ,
options = [ ' --stream ' , ' --log-level-file=VERBOSE ' ] , gdb = True )
# break at streaming start
gdb . set_breakpoint ( ' backup_data_file ' )
gdb . run_until_break ( )
gdb . remove_all_breakpoints ( )
gdb . _execute ( ' signal SIGINT ' )
sleep ( 1 )
self . assertEqual (
self . show_pb (
backup_dir , ' node ' ) [ 0 ] [ ' status ' ] , ' ERROR ' )
with open ( os . path . join ( backup_dir , ' log ' , ' pg_probackup.log ' ) ) as f :
log_content = f . read ( )
#print(log_content)
self . assertIn (
' WARNING: backup in progress, stop backup ' ,
log_content )
2022-11-03 02:37:58 +03:00
if self . get_version ( node ) < 150000 :
self . assertIn (
' FROM pg_catalog.pg_stop_backup ' ,
log_content )
else :
self . assertIn (
' FROM pg_catalog.pg_backup_stop ' ,
log_content )
2021-05-18 14:59:00 +03:00
self . assertIn (
' setting its status to ERROR ' ,
log_content )
# @unittest.skip("skip")
def test_pg_stop_backup_missing_permissions ( self ) :
""" """
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2021-05-18 14:59:00 +03:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2021-05-18 14:59:00 +03:00
set_replication = True ,
ptrack_enable = self . ptrack ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
node . pgbench_init ( scale = 5 )
self . simple_bootstrap ( node , ' backup ' )
if self . get_version ( node ) < 90600 :
node . safe_psql (
' postgres ' ,
' REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup ' )
elif self . get_version ( node ) > 90600 and self . get_version ( node ) < 100000 :
node . safe_psql (
' postgres ' ,
' REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup ' )
2022-07-01 15:46:53 +03:00
elif self . get_version ( node ) < 150000 :
2021-05-18 14:59:00 +03:00
node . safe_psql (
' postgres ' ,
' REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup ' )
2022-07-01 15:46:53 +03:00
else :
node . safe_psql (
' postgres ' ,
' REVOKE EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) FROM backup ' )
2021-05-18 14:59:00 +03:00
# Full backup in streaming mode
try :
self . backup_node (
backup_dir , ' node ' , node ,
options = [ ' --stream ' , ' -U ' , ' backup ' ] )
# we should die here because exception is what we expect to happen
2022-07-01 15:46:53 +03:00
if self . get_version ( node ) < 150000 :
self . assertEqual (
1 , 0 ,
" Expecting Error because of missing permissions on pg_stop_backup "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
else :
self . assertEqual (
1 , 0 ,
" Expecting Error because of missing permissions on pg_backup_stop "
" \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
2021-05-18 14:59:00 +03:00
except ProbackupException as e :
2022-07-01 15:46:53 +03:00
if self . get_version ( node ) < 150000 :
self . assertIn (
" ERROR: permission denied for function pg_stop_backup " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
else :
self . assertIn (
" ERROR: permission denied for function pg_backup_stop " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
2021-05-18 14:59:00 +03:00
self . assertIn (
" query was: SELECT pg_catalog.txid_snapshot_xmax " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
2022-06-03 13:45:50 +05:00
# @unittest.skip("skip")
def test_start_time ( self ) :
2022-09-01 14:38:17 +03:00
""" Test, that option --start-time allows to set backup_id and restore """
2022-06-03 13:45:50 +05:00
node = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node ' ) ,
2022-09-01 14:38:17 +03:00
set_replication = True ,
2022-06-03 13:45:50 +05:00
ptrack_enable = self . ptrack ,
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup ' )
2022-06-03 13:45:50 +05:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . slow_start ( )
# FULL backup
startTime = int ( time ( ) )
self . backup_node (
2022-09-01 14:38:17 +03:00
backup_dir , ' node ' , node , backup_type = ' full ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( str ( startTime ) ) ] )
# restore FULL backup by backup_id calculated from start-time
self . restore_node (
backup_dir , ' node ' ,
2022-10-23 05:30:13 +03:00
data_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' node_restored_full ' ) ,
2022-09-01 14:38:17 +03:00
backup_id = base36enc ( startTime ) )
2022-06-03 13:45:50 +05:00
2022-09-01 14:38:17 +03:00
#FULL backup with incorrect start time
2022-06-03 13:45:50 +05:00
try :
2022-09-01 14:38:17 +03:00
startTime = str ( int ( time ( ) - 100000 ) )
2022-06-03 13:45:50 +05:00
self . backup_node (
2022-09-01 14:38:17 +03:00
backup_dir , ' node ' , node , backup_type = ' full ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( startTime ) ] )
2022-06-03 13:45:50 +05:00
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
2022-09-01 14:38:17 +03:00
' Expecting Error because start time for new backup must be newer '
' \n Output: {0} \n CMD: {1} ' . format (
2022-06-03 13:45:50 +05:00
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertRegex (
e . message ,
2022-09-01 14:38:17 +03:00
r " ERROR: Can ' t assign backup_id from requested start_time \ ( \ w* \ ), this time must be later that backup \ w* \ n " ,
2022-06-03 13:45:50 +05:00
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
2022-09-01 14:38:17 +03:00
# DELTA backup
startTime = int ( time ( ) )
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' delta ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( str ( startTime ) ) ] )
# restore DELTA backup by backup_id calculated from start-time
self . restore_node (
backup_dir , ' node ' ,
2022-10-23 05:30:13 +03:00
data_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' node_restored_delta ' ) ,
2022-09-01 14:38:17 +03:00
backup_id = base36enc ( startTime ) )
2022-06-03 13:45:50 +05:00
2022-09-01 14:38:17 +03:00
# PAGE backup
startTime = int ( time ( ) )
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' page ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( str ( startTime ) ) ] )
# restore PAGE backup by backup_id calculated from start-time
self . restore_node (
backup_dir , ' node ' ,
2022-10-23 05:30:13 +03:00
data_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' node_restored_page ' ) ,
2022-09-01 14:38:17 +03:00
backup_id = base36enc ( startTime ) )
2022-06-03 13:45:50 +05:00
2022-09-01 14:38:17 +03:00
# PTRACK backup
if self . ptrack :
2022-06-03 13:45:50 +05:00
node . safe_psql (
2022-09-01 14:38:17 +03:00
' postgres ' ,
' create extension ptrack ' )
2022-06-03 13:45:50 +05:00
2022-09-01 14:38:17 +03:00
startTime = int ( time ( ) )
self . backup_node (
backup_dir , ' node ' , node , backup_type = ' ptrack ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( str ( startTime ) ) ] )
# restore PTRACK backup by backup_id calculated from start-time
self . restore_node (
backup_dir , ' node ' ,
2022-10-23 05:30:13 +03:00
data_dir = os . path . join ( self . tmp_path , self . module_name , self . fname , ' node_restored_ptrack ' ) ,
2022-09-01 14:38:17 +03:00
backup_id = base36enc ( startTime ) )
2022-06-03 13:45:50 +05:00
# @unittest.skip("skip")
def test_start_time_few_nodes ( self ) :
2022-09-01 14:38:17 +03:00
""" Test, that we can synchronize backup_id ' s for different DBs """
2022-06-03 13:45:50 +05:00
node1 = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node1 ' ) ,
2022-09-01 14:38:17 +03:00
set_replication = True ,
2022-06-03 13:45:50 +05:00
ptrack_enable = self . ptrack ,
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir1 = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup1 ' )
2022-06-03 13:45:50 +05:00
self . init_pb ( backup_dir1 )
self . add_instance ( backup_dir1 , ' node1 ' , node1 )
self . set_archiving ( backup_dir1 , ' node1 ' , node1 )
node1 . slow_start ( )
node2 = self . make_simple_node (
2022-10-23 05:30:13 +03:00
base_dir = os . path . join ( self . module_name , self . fname , ' node2 ' ) ,
2022-09-01 14:38:17 +03:00
set_replication = True ,
2022-06-03 13:45:50 +05:00
ptrack_enable = self . ptrack ,
initdb_params = [ ' --data-checksums ' ] )
2022-10-23 05:30:13 +03:00
backup_dir2 = os . path . join ( self . tmp_path , self . module_name , self . fname , ' backup2 ' )
2022-06-03 13:45:50 +05:00
self . init_pb ( backup_dir2 )
self . add_instance ( backup_dir2 , ' node2 ' , node2 )
self . set_archiving ( backup_dir2 , ' node2 ' , node2 )
node2 . slow_start ( )
# FULL backup
2022-09-01 14:38:17 +03:00
startTime = str ( int ( time ( ) ) )
2022-06-03 13:45:50 +05:00
self . backup_node (
2022-09-01 14:38:17 +03:00
backup_dir1 , ' node1 ' , node1 , backup_type = ' full ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( startTime ) ] )
2022-06-03 13:45:50 +05:00
self . backup_node (
2022-09-01 14:38:17 +03:00
backup_dir2 , ' node2 ' , node2 , backup_type = ' full ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( startTime ) ] )
2022-06-03 13:45:50 +05:00
show_backup1 = self . show_pb ( backup_dir1 , ' node1 ' ) [ 0 ]
show_backup2 = self . show_pb ( backup_dir2 , ' node2 ' ) [ 0 ]
self . assertEqual ( show_backup1 [ ' id ' ] , show_backup2 [ ' id ' ] )
# DELTA backup
2022-09-01 14:38:17 +03:00
startTime = str ( int ( time ( ) ) )
2022-06-03 13:45:50 +05:00
self . backup_node (
2022-09-01 14:38:17 +03:00
backup_dir1 , ' node1 ' , node1 , backup_type = ' delta ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( startTime ) ] )
2022-06-03 13:45:50 +05:00
self . backup_node (
2022-09-01 14:38:17 +03:00
backup_dir2 , ' node2 ' , node2 , backup_type = ' delta ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( startTime ) ] )
2022-06-03 13:45:50 +05:00
show_backup1 = self . show_pb ( backup_dir1 , ' node1 ' ) [ 1 ]
show_backup2 = self . show_pb ( backup_dir2 , ' node2 ' ) [ 1 ]
self . assertEqual ( show_backup1 [ ' id ' ] , show_backup2 [ ' id ' ] )
# PAGE backup
2022-09-01 14:38:17 +03:00
startTime = str ( int ( time ( ) ) )
2022-06-03 13:45:50 +05:00
self . backup_node (
2022-09-01 14:38:17 +03:00
backup_dir1 , ' node1 ' , node1 , backup_type = ' page ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( startTime ) ] )
2022-06-03 13:45:50 +05:00
self . backup_node (
2022-09-01 14:38:17 +03:00
backup_dir2 , ' node2 ' , node2 , backup_type = ' page ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( startTime ) ] )
2022-06-03 13:45:50 +05:00
show_backup1 = self . show_pb ( backup_dir1 , ' node1 ' ) [ 2 ]
show_backup2 = self . show_pb ( backup_dir2 , ' node2 ' ) [ 2 ]
self . assertEqual ( show_backup1 [ ' id ' ] , show_backup2 [ ' id ' ] )
# PTRACK backup
2022-09-01 14:38:17 +03:00
if self . ptrack :
2022-06-03 13:45:50 +05:00
node1 . safe_psql (
2022-09-01 14:38:17 +03:00
' postgres ' ,
' create extension ptrack ' )
2022-06-03 13:45:50 +05:00
node2 . safe_psql (
2022-09-01 14:38:17 +03:00
' postgres ' ,
' create extension ptrack ' )
2022-06-03 13:45:50 +05:00
2022-09-01 14:38:17 +03:00
startTime = str ( int ( time ( ) ) )
2022-06-03 13:45:50 +05:00
self . backup_node (
2022-09-01 14:38:17 +03:00
backup_dir1 , ' node1 ' , node1 , backup_type = ' ptrack ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( startTime ) ] )
2022-06-03 13:45:50 +05:00
self . backup_node (
2022-09-01 14:38:17 +03:00
backup_dir2 , ' node2 ' , node2 , backup_type = ' ptrack ' ,
options = [ ' --stream ' , ' --start-time= {0} ' . format ( startTime ) ] )
show_backup1 = self . show_pb ( backup_dir1 , ' node1 ' ) [ 3 ]
show_backup2 = self . show_pb ( backup_dir2 , ' node2 ' ) [ 3 ]
self . assertEqual ( show_backup1 [ ' id ' ] , show_backup2 [ ' id ' ] )
2022-06-03 13:45:50 +05:00