2018-12-11 12:03:46 +03:00
import os
import unittest
from . helpers . ptrack_helpers import ProbackupTest , ProbackupException
from datetime import datetime , timedelta
import subprocess
from testgres import QueryException
import shutil
import sys
import time
module_name = ' checkdb '
class CheckdbTest ( ProbackupTest , unittest . TestCase ) :
2019-04-19 01:52:41 +03:00
# @unittest.skip("skip")
2019-04-19 11:17:20 +03:00
def test_checkdb_amcheck_only_sanity ( self ) :
2019-04-19 01:52:41 +03:00
""" """
2018-12-11 12:03:46 +03:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node (
base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
2019-03-31 11:43:50 +03:00
initdb_params = [ ' --data-checksums ' ] )
2018-12-11 12:03:46 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
node . safe_psql (
" postgres " ,
" create table t_heap as select i "
2019-04-19 01:52:41 +03:00
" as id from generate_series(0,100) i " )
2018-12-11 12:03:46 +03:00
node . safe_psql (
" postgres " ,
2019-04-19 01:52:41 +03:00
" create index on t_heap(id) " )
2019-04-22 20:52:00 +03:00
try :
node . safe_psql (
" postgres " ,
" create extension amcheck " )
except QueryException as e :
node . safe_psql (
" postgres " ,
" create extension amcheck_next " )
2019-04-19 01:52:41 +03:00
log_file_path = os . path . join (
backup_dir , ' log ' , ' pg_probackup.log ' )
# simple sanity
try :
self . checkdb_node (
options = [ ' --skip-block-validation ' ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because --amcheck option is missing \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
" ERROR: Option ' --skip-block-validation ' must be "
" used with ' --amcheck ' option " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
# simple sanity
output = self . checkdb_node (
options = [
' --amcheck ' ,
' --skip-block-validation ' ,
' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
self . assertIn (
2019-05-31 18:15:43 +03:00
' INFO: checkdb --amcheck finished successfully ' ,
2019-04-19 01:52:41 +03:00
output )
self . assertIn (
2019-05-31 18:15:43 +03:00
' All checked indexes are valid ' ,
2019-04-19 01:52:41 +03:00
output )
# logging to file sanity
try :
self . checkdb_node (
options = [
' --amcheck ' ,
' --skip-block-validation ' ,
' --log-level-file=verbose ' ,
2019-05-29 12:09:53 +03:00
' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
2019-04-19 01:52:41 +03:00
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because log_directory missing \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
" ERROR: Cannot save checkdb logs to a file. "
" You must specify --log-directory option when "
" running checkdb with --log-level-file option enabled " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
# If backup_dir provided, then instance name must be
# provided too
try :
self . checkdb_node (
backup_dir ,
options = [
' --amcheck ' ,
' --skip-block-validation ' ,
' --log-level-file=verbose ' ,
' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because log_directory missing \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
" ERROR: required parameter not specified: --instance " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
# checkdb can use default or set in config values,
# if backup_dir and instance name are provided
self . checkdb_node (
backup_dir ,
' node ' ,
options = [
' --amcheck ' ,
' --skip-block-validation ' ,
' --log-level-file=verbose ' ,
' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
# check that file present and full of messages
os . path . isfile ( log_file_path )
with open ( log_file_path ) as f :
log_file_content = f . read ( )
self . assertIn (
2019-05-31 18:15:43 +03:00
' INFO: checkdb --amcheck finished successfully ' ,
2019-04-19 01:52:41 +03:00
log_file_content )
self . assertIn (
' VERBOSE: (query) ' ,
log_file_content )
os . unlink ( log_file_path )
# log-level-file and log-directory are provided
self . checkdb_node (
backup_dir ,
' node ' ,
options = [
' --amcheck ' ,
' --skip-block-validation ' ,
' --log-level-file=verbose ' ,
' --log-directory= {0} ' . format (
os . path . join ( backup_dir , ' log ' ) ) ,
' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
# check that file present and full of messages
os . path . isfile ( log_file_path )
with open ( log_file_path ) as f :
log_file_content = f . read ( )
self . assertIn (
2019-05-31 18:15:43 +03:00
' INFO: checkdb --amcheck finished successfully ' ,
2019-04-19 01:52:41 +03:00
log_file_content )
self . assertIn (
' VERBOSE: (query) ' ,
log_file_content )
os . unlink ( log_file_path )
2018-12-11 12:03:46 +03:00
gdb = self . checkdb_node (
2019-04-19 01:52:41 +03:00
gdb = True ,
options = [
' --amcheck ' ,
' --skip-block-validation ' ,
' --log-level-file=verbose ' ,
' --log-directory= {0} ' . format (
os . path . join ( backup_dir , ' log ' ) ) ,
' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
2018-12-11 12:03:46 +03:00
gdb . set_breakpoint ( ' amcheck_one_index ' )
gdb . run_until_break ( )
node . safe_psql (
" postgres " ,
2019-04-19 01:52:41 +03:00
" drop table t_heap " )
gdb . remove_all_breakpoints ( )
2018-12-11 12:03:46 +03:00
gdb . continue_execution_until_exit ( )
2019-04-19 01:52:41 +03:00
# check that message about missing index is present
with open ( log_file_path ) as f :
log_file_content = f . read ( )
self . assertIn (
2019-05-31 18:15:43 +03:00
' ERROR: checkdb --amcheck finished with failure ' ,
2019-04-19 01:52:41 +03:00
log_file_content )
self . assertIn (
2019-05-29 12:09:53 +03:00
" WARNING: Thread [1]. Amcheck failed in database ' postgres ' "
" for index: ' public.t_heap_id_idx ' : " ,
2019-04-19 01:52:41 +03:00
log_file_content )
self . assertIn (
' ERROR: could not open relation with OID ' ,
log_file_content )
2018-12-11 12:03:46 +03:00
# Clean after yourself
2020-12-01 12:36:36 +03:00
gdb . kill ( )
2022-02-17 01:24:08 +03:00
node . stop ( )
2019-03-31 11:43:50 +03:00
self . del_test_dir ( module_name , fname )
# @unittest.skip("skip")
2019-05-28 12:41:03 +03:00
def test_basic_checkdb_amcheck_only_sanity ( self ) :
2019-04-19 01:52:41 +03:00
""" """
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node (
base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
2019-04-23 10:06:27 +03:00
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
2019-04-19 01:52:41 +03:00
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
2019-04-19 11:12:19 +03:00
# create two databases
2019-04-19 10:59:52 +03:00
node . safe_psql ( " postgres " , " create database db1 " )
2019-04-22 20:52:00 +03:00
try :
2019-07-12 18:01:28 +03:00
node . safe_psql (
2019-04-22 20:52:00 +03:00
" db1 " ,
" create extension amcheck " )
except QueryException as e :
node . safe_psql (
" db1 " ,
" create extension amcheck_next " )
2019-04-19 01:52:41 +03:00
2019-04-19 10:59:52 +03:00
node . safe_psql ( " postgres " , " create database db2 " )
2019-04-22 20:52:00 +03:00
try :
node . safe_psql (
" db2 " ,
" create extension amcheck " )
except QueryException as e :
node . safe_psql (
" db2 " ,
" create extension amcheck_next " )
2019-04-19 01:52:41 +03:00
2019-04-19 11:12:19 +03:00
# init pgbench in two databases and corrupt both indexes
2019-04-19 10:59:52 +03:00
node . pgbench_init ( scale = 5 , dbname = ' db1 ' )
node . pgbench_init ( scale = 5 , dbname = ' db2 ' )
2019-04-19 01:52:41 +03:00
node . safe_psql (
" db2 " ,
" alter index pgbench_accounts_pkey rename to some_index " )
index_path_1 = os . path . join (
node . data_dir ,
node . safe_psql (
" db1 " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' pgbench_accounts_pkey ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( ) )
2019-04-19 01:52:41 +03:00
index_path_2 = os . path . join (
node . data_dir ,
node . safe_psql (
" db2 " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' some_index ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( ) )
2019-04-19 01:52:41 +03:00
try :
self . checkdb_node (
options = [
' --amcheck ' ,
' --skip-block-validation ' ,
' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because some db was not amchecked "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
" ERROR: Some databases were not amchecked " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
node . stop ( )
# Let`s do index corruption
with open ( index_path_1 , " rb+ " , 0 ) as f :
f . seek ( 42000 )
f . write ( b " blablahblahs " )
f . flush ( )
f . close
with open ( index_path_2 , " rb+ " , 0 ) as f :
f . seek ( 42000 )
f . write ( b " blablahblahs " )
f . flush ( )
f . close
node . slow_start ( )
log_file_path = os . path . join (
backup_dir , ' log ' , ' pg_probackup.log ' )
try :
self . checkdb_node (
options = [
' --amcheck ' ,
' --skip-block-validation ' ,
' --log-level-file=verbose ' ,
' --log-directory= {0} ' . format (
os . path . join ( backup_dir , ' log ' ) ) ,
' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because some db was not amchecked "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
2019-05-31 18:15:43 +03:00
" ERROR: checkdb --amcheck finished with failure " ,
2019-04-19 01:52:41 +03:00
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
# corruption of both indexes in db1 and db2 must be detected
# also the that amcheck is not installed in 'postgres'
2019-07-23 20:10:58 +03:00
# should be logged
2019-04-19 01:52:41 +03:00
with open ( log_file_path ) as f :
log_file_content = f . read ( )
self . assertIn (
2019-05-29 12:09:53 +03:00
" WARNING: Thread [1]. Amcheck failed in database ' db1 ' "
" for index: ' public.pgbench_accounts_pkey ' : " ,
2019-04-19 01:52:41 +03:00
log_file_content )
self . assertIn (
2019-05-29 12:09:53 +03:00
" WARNING: Thread [1]. Amcheck failed in database ' db2 ' "
" for index: ' public.some_index ' : " ,
2019-04-19 01:52:41 +03:00
log_file_content )
self . assertIn (
2019-05-31 18:15:43 +03:00
" ERROR: checkdb --amcheck finished with failure " ,
2019-04-19 01:52:41 +03:00
log_file_content )
# Clean after yourself
2022-02-17 01:24:08 +03:00
node . stop ( )
2021-02-18 05:13:11 +00:00
self . del_test_dir ( module_name , fname )
2019-04-19 01:52:41 +03:00
# @unittest.skip("skip")
def test_checkdb_block_validation_sanity ( self ) :
2019-03-31 11:43:50 +03:00
""" make node, corrupt some pages, check that checkdb failed """
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
node = self . make_simple_node (
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
node . safe_psql (
" postgres " ,
" create table t_heap as select 1 as id, md5(i::text) as text, "
" md5(repeat(i::text,10))::tsvector as tsvector "
" from generate_series(0,1000) i " )
node . safe_psql (
" postgres " ,
" CHECKPOINT; " )
heap_path = node . safe_psql (
" postgres " ,
2020-10-30 02:47:06 +03:00
" select pg_relation_filepath( ' t_heap ' ) " ) . decode ( ' utf-8 ' ) . rstrip ( )
2019-03-31 11:43:50 +03:00
2019-04-19 01:52:41 +03:00
# sanity
try :
self . checkdb_node ( )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because pgdata must be specified \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
" ERROR: required parameter not specified: PGDATA (-D, --pgdata) " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
self . checkdb_node (
data_dir = node . data_dir ,
options = [ ' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
self . checkdb_node (
backup_dir , ' node ' ,
options = [ ' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
heap_full_path = os . path . join ( node . data_dir , heap_path )
with open ( heap_full_path , " rb+ " , 0 ) as f :
2019-03-31 11:43:50 +03:00
f . seek ( 9000 )
f . write ( b " bla " )
f . flush ( )
f . close
2019-04-19 01:52:41 +03:00
with open ( heap_full_path , " rb+ " , 0 ) as f :
2019-03-31 11:43:50 +03:00
f . seek ( 42000 )
f . write ( b " bla " )
f . flush ( )
f . close
2019-04-19 01:52:41 +03:00
try :
self . checkdb_node (
backup_dir , ' node ' ,
options = [ ' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of data corruption \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
" ERROR: Checkdb failed " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
2019-07-01 16:24:57 +03:00
' WARNING: Corruption detected in file " {0} " , block 1 ' . format (
2019-04-30 01:49:14 +03:00
os . path . normpath ( heap_full_path ) ) ,
2019-04-19 01:52:41 +03:00
e . message )
self . assertIn (
2019-07-01 16:24:57 +03:00
' WARNING: Corruption detected in file " {0} " , block 5 ' . format (
2019-04-30 01:49:14 +03:00
os . path . normpath ( heap_full_path ) ) ,
2019-04-19 01:52:41 +03:00
e . message )
2019-03-31 11:43:50 +03:00
# Clean after yourself
2022-02-17 01:24:08 +03:00
node . stop ( )
self . del_test_dir ( module_name , fname )
def test_checkdb_checkunique ( self ) :
""" Test checkunique parameter of amcheck.bt_index_check function """
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node (
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
initdb_params = [ ' --data-checksums ' ] )
node . slow_start ( )
try :
node . safe_psql (
" postgres " ,
" create extension amcheck " )
except QueryException as e :
node . safe_psql (
" postgres " ,
" create extension amcheck_next " )
# Part of https://commitfest.postgresql.org/32/2976/ patch test
node . safe_psql (
" postgres " ,
" CREATE TABLE bttest_unique(a varchar(50), b varchar(1500), c bytea, d varchar(50)); "
" ALTER TABLE bttest_unique SET (autovacuum_enabled = false); "
" CREATE UNIQUE INDEX bttest_unique_idx ON bttest_unique(a,b); "
" UPDATE pg_catalog.pg_index SET indisunique = false "
" WHERE indrelid = (SELECT oid FROM pg_catalog.pg_class WHERE relname = ' bttest_unique ' ); "
" INSERT INTO bttest_unique "
" SELECT i::text::varchar, "
" array_to_string(array( "
" SELECT substr( ' ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ' , ((random()*(36-1)+1)::integer), 1) "
" FROM generate_series(1,1300)), ' ' )::varchar, "
" i::text::bytea, i::text::varchar "
" FROM generate_series(0,1) AS i, generate_series(0,30) AS x; "
" UPDATE pg_catalog.pg_index SET indisunique = true "
" WHERE indrelid = (SELECT oid FROM pg_catalog.pg_class WHERE relname = ' bttest_unique ' ); "
" DELETE FROM bttest_unique WHERE ctid::text= ' (0,2) ' ; "
" DELETE FROM bttest_unique WHERE ctid::text= ' (4,2) ' ; "
" DELETE FROM bttest_unique WHERE ctid::text= ' (4,3) ' ; "
" DELETE FROM bttest_unique WHERE ctid::text= ' (9,3) ' ; " )
# run without checkunique option (error will not detected)
output = self . checkdb_node (
options = [
' --amcheck ' ,
' --skip-block-validation ' ,
' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
self . assertIn (
' INFO: checkdb --amcheck finished successfully ' ,
output )
self . assertIn (
' All checked indexes are valid ' ,
output )
# run with checkunique option
try :
self . checkdb_node (
options = [
' --amcheck ' ,
' --skip-block-validation ' ,
' --checkunique ' ,
' -d ' , ' postgres ' , ' -p ' , str ( node . port ) ] )
if ( ProbackupTest . enterprise and
( self . get_version ( node ) > = 111300 and self . get_version ( node ) < 120000
or self . get_version ( node ) > = 120800 and self . get_version ( node ) < 130000
or self . get_version ( node ) > = 130400 ) ) :
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because of index corruption \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
else :
self . assertRegex (
self . output ,
r " WARNING: Extension ' amcheck(|_next) ' version [ \ d.]* in schema ' public ' do not support ' checkunique ' parameter " )
except ProbackupException as e :
self . assertIn (
" ERROR: checkdb --amcheck finished with failure. Not all checked indexes are valid. All databases were amchecked. " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
" Amcheck failed in database ' postgres ' for index: ' public.bttest_unique_idx ' : ERROR: index \" bttest_unique_idx \" is corrupted. There are tuples violating UNIQUE constraint " ,
e . message )
# Clean after yourself
node . stop ( )
2019-03-31 11:43:50 +03:00
self . del_test_dir ( module_name , fname )
2019-05-23 20:28:46 +03:00
# @unittest.skip("skip")
def test_checkdb_sigint_handling ( self ) :
""" """
2021-08-04 01:59:49 +07:00
if not self . gdb :
self . skipTest (
" Specify PGPROBACKUP_GDB and build without "
" optimizations for run this test "
)
2019-05-23 20:28:46 +03:00
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node (
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
try :
node . safe_psql (
" postgres " ,
" create extension amcheck " )
except QueryException as e :
node . safe_psql (
" postgres " ,
" create extension amcheck_next " )
# FULL backup
gdb = self . checkdb_node (
backup_dir , ' node ' , gdb = True ,
options = [
2020-10-30 02:47:06 +03:00
' -d ' , ' postgres ' , ' -j ' , ' 2 ' ,
2019-05-23 20:28:46 +03:00
' --skip-block-validation ' ,
2020-10-30 02:47:06 +03:00
' --progress ' ,
2019-05-23 20:28:46 +03:00
' --amcheck ' , ' -p ' , str ( node . port ) ] )
gdb . set_breakpoint ( ' amcheck_one_index ' )
gdb . run_until_break ( )
2020-10-30 02:47:06 +03:00
gdb . continue_execution_until_break ( 20 )
2019-05-23 20:28:46 +03:00
gdb . remove_all_breakpoints ( )
gdb . _execute ( ' signal SIGINT ' )
2019-07-07 17:31:42 +03:00
gdb . continue_execution_until_error ( )
2019-05-23 20:28:46 +03:00
with open ( node . pg_log_file , ' r ' ) as f :
output = f . read ( )
self . assertNotIn ( ' could not receive data from client ' , output )
self . assertNotIn ( ' could not send data to client ' , output )
self . assertNotIn ( ' connection to client lost ' , output )
# Clean after yourself
2020-12-01 12:36:36 +03:00
gdb . kill ( )
2022-02-17 01:24:08 +03:00
node . stop ( )
2019-05-23 20:28:46 +03:00
self . del_test_dir ( module_name , fname )
2020-02-22 18:17:41 +03:00
# @unittest.skip("skip")
def test_checkdb_with_least_privileges ( self ) :
""" """
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node (
base_dir = os . path . join ( module_name , fname , ' node ' ) ,
initdb_params = [ ' --data-checksums ' ] )
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . slow_start ( )
node . safe_psql (
' postgres ' ,
' CREATE DATABASE backupdb ' )
try :
node . safe_psql (
" backupdb " ,
" create extension amcheck " )
except QueryException as e :
node . safe_psql (
" backupdb " ,
" create extension amcheck_next " )
node . safe_psql (
' backupdb ' ,
" REVOKE ALL ON DATABASE backupdb from PUBLIC; "
" REVOKE ALL ON SCHEMA public from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
" REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
" REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
" REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
" REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
" REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " )
# PG 9.5
if self . get_version ( node ) < 90600 :
node . safe_psql (
' backupdb ' ,
' CREATE ROLE backup WITH LOGIN; '
' GRANT CONNECT ON DATABASE backupdb to backup; '
' GRANT USAGE ON SCHEMA pg_catalog TO backup; '
' GRANT USAGE ON SCHEMA public TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; '
2021-10-13 21:18:03 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; '
2022-02-17 01:24:08 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; '
2020-02-22 18:17:41 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.charne( " char " , " char " ) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; '
2022-02-17 01:24:08 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; '
2021-10-13 21:18:03 +03:00
' GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup; ' # amcheck-next function
2020-02-22 18:17:41 +03:00
)
# PG 9.6
elif self . get_version ( node ) > 90600 and self . get_version ( node ) < 100000 :
node . safe_psql (
' backupdb ' ,
' CREATE ROLE backup WITH LOGIN; '
' GRANT CONNECT ON DATABASE backupdb to backup; '
' GRANT USAGE ON SCHEMA pg_catalog TO backup; '
' GRANT USAGE ON SCHEMA public TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; '
2021-10-13 21:18:03 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; '
2022-02-17 01:24:08 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; '
2020-02-22 18:17:41 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.charne( " char " , " char " ) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; '
2022-02-17 01:24:08 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; '
2020-12-01 12:36:36 +03:00
# 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; '
2020-02-22 18:17:41 +03:00
' GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup; '
)
2021-10-13 21:18:03 +03:00
# PG 10
elif self . get_version ( node ) > 100000 and self . get_version ( node ) < 110000 :
node . safe_psql (
' backupdb ' ,
' CREATE ROLE backup WITH LOGIN; '
' GRANT CONNECT ON DATABASE backupdb to backup; '
' GRANT USAGE ON SCHEMA pg_catalog TO backup; '
' GRANT USAGE ON SCHEMA public TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; '
2022-02-17 01:24:08 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; '
2021-10-13 21:18:03 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.charne( " char " , " char " ) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; '
2022-02-17 01:24:08 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; '
2022-05-31 12:49:20 +05:00
' GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; '
' GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; '
2021-10-13 21:18:03 +03:00
)
2021-10-18 05:51:12 +03:00
if ProbackupTest . enterprise :
# amcheck-1.1
node . safe_psql (
' backupdb ' ,
' GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup ' )
else :
# amcheck-1.0
node . safe_psql (
' backupdb ' ,
' GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup ' )
2022-02-17 01:24:08 +03:00
# >= 11 < 14
elif self . get_version ( node ) > 110000 and self . get_version ( node ) < 140000 :
node . safe_psql (
' backupdb ' ,
' CREATE ROLE backup WITH LOGIN; '
' GRANT CONNECT ON DATABASE backupdb to backup; '
' GRANT USAGE ON SCHEMA pg_catalog TO backup; '
' GRANT USAGE ON SCHEMA public TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.charne( " char " , " char " ) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; '
' GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; '
' GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup; '
2022-05-31 12:49:20 +05:00
' GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; '
2022-02-17 01:24:08 +03:00
)
# checkunique parameter
if ProbackupTest . enterprise :
if ( self . get_version ( node ) > = 111300 and self . get_version ( node ) < 120000
or self . get_version ( node ) > = 120800 and self . get_version ( node ) < 130000
or self . get_version ( node ) > = 130400 ) :
node . safe_psql (
" backupdb " ,
" GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup " )
# >= 14
2020-02-22 18:17:41 +03:00
else :
node . safe_psql (
' backupdb ' ,
' CREATE ROLE backup WITH LOGIN; '
' GRANT CONNECT ON DATABASE backupdb to backup; '
' GRANT USAGE ON SCHEMA pg_catalog TO backup; '
' GRANT USAGE ON SCHEMA public TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; '
' GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; '
2021-10-13 21:18:03 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; '
2022-02-17 01:24:08 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; '
2020-02-22 18:17:41 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.charne( " char " , " char " ) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; '
2022-02-17 01:24:08 +03:00
' GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; '
' GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anycompatiblearray, anycompatible) TO backup; '
2020-02-22 18:17:41 +03:00
' GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; '
' GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup; '
2022-05-31 12:49:20 +05:00
' GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; '
2020-02-22 18:17:41 +03:00
)
2022-02-17 01:24:08 +03:00
# checkunique parameter
if ProbackupTest . enterprise :
node . safe_psql (
" backupdb " ,
" GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup " )
2020-02-22 18:17:41 +03:00
# checkdb
try :
self . checkdb_node (
backup_dir , ' node ' ,
options = [
' --amcheck ' , ' -U ' , ' backup ' ,
' -d ' , ' backupdb ' , ' -p ' , str ( node . port ) ] )
# we should die here because exception is what we expect to happen
self . assertEqual (
1 , 0 ,
" Expecting Error because permissions are missing \n "
" Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn (
" INFO: Amcheck succeeded for database ' backupdb ' " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
" WARNING: Extension ' amcheck ' or ' amcheck_next ' are "
" not installed in database postgres " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
self . assertIn (
" ERROR: Some databases were not amchecked " ,
e . message ,
" \n Unexpected Error Message: {0} \n CMD: {1} " . format (
repr ( e . message ) , self . cmd ) )
2020-12-01 12:36:36 +03:00
# Clean after yourself
2022-02-17 01:24:08 +03:00
node . stop ( )
2020-12-01 12:36:36 +03:00
self . del_test_dir ( module_name , fname )