2017-07-12 16:28:28 +02:00
import os
import unittest
from . helpers . ptrack_helpers import ProbackupTest , ProbackupException
from datetime import datetime , timedelta
import subprocess
2017-09-28 09:32:06 +02:00
from testgres import ClusterException
2017-10-09 14:32:48 +02:00
import shutil , sys , time
2017-07-12 16:28:28 +02:00
module_name = ' ptrack '
class PtrackBackupTest ( ProbackupTest , unittest . TestCase ) :
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
2017-07-12 16:28:28 +02:00
# @unittest.expectedFailure
def test_ptrack_enable ( self ) :
""" make ptrack without full backup, should result in error """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 30s ' }
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . start ( )
# PTRACK BACKUP
try :
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual ( 1 , 0 , " Expecting Error because ptrack disabled. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2017-12-13 10:15:42 +02:00
self . assertIn ( ' ERROR: Ptrack is disabled \n ' , e . message ,
2017-07-12 16:28:28 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_disable ( self ) :
""" Take full backup, disable ptrack restart postgresql, enable ptrack, restart postgresql, take ptrack backup which should fail """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 30s ' , ' ptrack_enable ' : ' on ' }
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . start ( )
# FULL BACKUP
2017-10-05 15:59:05 +02:00
# print('START')
# print(node.safe_psql('postgres', "select pg_ptrack_control_lsn()"))
2017-10-05 11:06:01 +02:00
self . backup_node ( backup_dir , ' node ' , node , options = [ ' --stream ' ] )
2017-10-05 15:59:05 +02:00
# print('AFTER FULL')
# print(node.safe_psql('postgres', "select pg_ptrack_control_lsn()"))
2017-10-05 11:06:01 +02:00
# DISABLE PTRACK
node . safe_psql ( ' postgres ' , " alter system set ptrack_enable to off " )
node . restart ( )
2017-10-05 15:59:05 +02:00
# print('DISABLED')
# print(node.safe_psql('postgres', "select pg_ptrack_control_lsn()"))
2017-10-05 11:06:01 +02:00
# ENABLE PTRACK
node . safe_psql ( ' postgres ' , " alter system set ptrack_enable to on " )
node . restart ( )
2017-10-05 15:59:05 +02:00
# print('ENABLED')
# print(node.safe_psql('postgres', "select pg_ptrack_control_lsn()"))
2017-10-05 11:06:01 +02:00
# PTRACK BACKUP
try :
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual ( 1 , 0 , " Expecting Error because ptrack_enable was set to OFF at some point after previous backup. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertIn ( ' ERROR: LSN from ptrack_control ' , e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
# Clean after yourself
self . del_test_dir ( module_name , fname )
# @unittest.skip("skip")
2017-07-12 16:28:28 +02:00
def test_ptrack_stream ( self ) :
""" make node, make full and ptrack stream backups, restore them and check data correctness """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2017-10-03 15:27:03 +02:00
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 30s ' , ' ptrack_enable ' : ' on ' , ' autovacuum ' : ' off ' }
2017-07-12 16:28:28 +02:00
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . start ( )
# FULL BACKUP
2017-10-03 15:27:03 +02:00
node . safe_psql ( " postgres " , " create sequence t_seq " )
2017-07-12 16:28:28 +02:00
node . safe_psql (
" postgres " ,
" create table t_heap as select i as id, nextval( ' t_seq ' ) as t_seq, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i " )
full_result = node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
full_backup_id = self . backup_node ( backup_dir , ' node ' , node , options = [ ' --stream ' ] )
# PTRACK BACKUP
node . safe_psql (
" postgres " ,
" insert into t_heap select i as id, nextval( ' t_seq ' ) as t_seq, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i " )
ptrack_result = node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
ptrack_backup_id = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ ' --stream ' ] )
2017-10-03 17:26:55 +02:00
pgdata = self . pgdata_content ( node . data_dir )
2017-07-12 16:28:28 +02:00
# Drop Node
node . cleanup ( )
# Restore and check full backup
self . assertIn ( " INFO: Restore of backup {0} completed. " . format ( full_backup_id ) ,
self . restore_node ( backup_dir , ' node ' , node , backup_id = full_backup_id , options = [ " -j " , " 4 " ] ) ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( self . output ) , self . cmd ) )
node . start ( )
2017-10-09 14:32:48 +02:00
while node . safe_psql ( " postgres " , " select pg_is_in_recovery() " ) == ' t \n ' :
time . sleep ( 1 )
2017-07-12 16:28:28 +02:00
full_result_new = node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
self . assertEqual ( full_result , full_result_new )
node . cleanup ( )
# Restore and check ptrack backup
self . assertIn ( " INFO: Restore of backup {0} completed. " . format ( ptrack_backup_id ) ,
self . restore_node ( backup_dir , ' node ' , node , backup_id = ptrack_backup_id , options = [ " -j " , " 4 " ] ) ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( self . output ) , self . cmd ) )
2017-10-03 15:27:03 +02:00
pgdata_restored = self . pgdata_content ( node . data_dir )
2017-07-12 16:28:28 +02:00
node . start ( )
2017-10-09 14:32:48 +02:00
while node . safe_psql ( " postgres " , " select pg_is_in_recovery() " ) == ' t \n ' :
time . sleep ( 1 )
2017-07-12 16:28:28 +02:00
ptrack_result_new = node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
self . assertEqual ( ptrack_result , ptrack_result_new )
2017-10-11 17:08:56 +02:00
if self . paranoia :
self . compare_pgdata ( pgdata , pgdata_restored )
2017-07-12 16:28:28 +02:00
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
2017-07-12 16:28:28 +02:00
def test_ptrack_archive ( self ) :
""" make archive node, make full and ptrack backups, check data correctness in restored instance """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2017-10-03 15:27:03 +02:00
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 30s ' , ' ptrack_enable ' : ' on ' , ' autovacuum ' : ' off ' }
2017-07-12 16:28:28 +02:00
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . start ( )
# FULL BACKUP
node . safe_psql (
" postgres " ,
" create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i " )
full_result = node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
full_backup_id = self . backup_node ( backup_dir , ' node ' , node )
2017-10-09 14:32:48 +02:00
full_target_time = self . show_pb ( backup_dir , ' node ' , full_backup_id ) [ ' recovery-time ' ]
2017-07-12 16:28:28 +02:00
# PTRACK BACKUP
node . safe_psql (
" postgres " ,
" insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i " )
ptrack_result = node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
ptrack_backup_id = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' )
2017-10-09 14:32:48 +02:00
ptrack_target_time = self . show_pb ( backup_dir , ' node ' , ptrack_backup_id ) [ ' recovery-time ' ]
2017-10-03 17:26:55 +02:00
pgdata = self . pgdata_content ( node . data_dir )
2017-07-12 16:28:28 +02:00
# Drop Node
node . cleanup ( )
# Check full backup
self . assertIn ( " INFO: Restore of backup {0} completed. " . format ( full_backup_id ) ,
2017-10-09 14:32:48 +02:00
self . restore_node ( backup_dir , ' node ' , node , backup_id = full_backup_id , options = [ " -j " , " 4 " , " --time= {0} " . format ( full_target_time ) ] ) ,
2017-07-12 16:28:28 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( self . output ) , self . cmd ) )
node . start ( )
2017-10-09 14:32:48 +02:00
while node . safe_psql ( " postgres " , " select pg_is_in_recovery() " ) == ' t \n ' :
time . sleep ( 1 )
2017-07-12 16:28:28 +02:00
full_result_new = node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
self . assertEqual ( full_result , full_result_new )
node . cleanup ( )
# Check ptrack backup
self . assertIn ( " INFO: Restore of backup {0} completed. " . format ( ptrack_backup_id ) ,
2017-10-09 14:32:48 +02:00
self . restore_node ( backup_dir , ' node ' , node , backup_id = ptrack_backup_id , options = [ " -j " , " 4 " , " --time= {0} " . format ( ptrack_target_time ) ] ) ,
2017-07-12 16:28:28 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( self . output ) , self . cmd ) )
2017-10-03 15:27:03 +02:00
pgdata_restored = self . pgdata_content ( node . data_dir )
2017-07-12 16:28:28 +02:00
node . start ( )
2017-10-09 14:32:48 +02:00
while node . safe_psql ( " postgres " , " select pg_is_in_recovery() " ) == ' t \n ' :
time . sleep ( 1 )
2017-07-12 16:28:28 +02:00
ptrack_result_new = node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
self . assertEqual ( ptrack_result , ptrack_result_new )
2017-10-11 17:08:56 +02:00
if self . paranoia :
self . compare_pgdata ( pgdata , pgdata_restored )
2017-07-12 16:28:28 +02:00
node . cleanup ( )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
2017-07-12 16:28:28 +02:00
def test_ptrack_pgpro417 ( self ) :
""" Make node, take full backup, take ptrack backup, delete ptrack backup. Try to take ptrack backup, which should fail """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2017-10-03 15:27:03 +02:00
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 30s ' , ' ptrack_enable ' : ' on ' , ' autovacuum ' : ' off ' }
2017-07-12 16:28:28 +02:00
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . start ( )
# FULL BACKUP
node . safe_psql (
" postgres " ,
" create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i " )
node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
backup_id = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' full ' , options = [ " --stream " ] )
start_lsn_full = self . show_pb ( backup_dir , ' node ' , backup_id ) [ ' start-lsn ' ]
# PTRACK BACKUP
node . safe_psql (
" postgres " ,
" insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i " )
node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
backup_id = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " ] )
start_lsn_ptrack = self . show_pb ( backup_dir , ' node ' , backup_id ) [ ' start-lsn ' ]
self . delete_pb ( backup_dir , ' node ' , backup_id )
# SECOND PTRACK BACKUP
node . safe_psql (
" postgres " ,
" insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(200,300) i " )
try :
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual ( 1 , 0 , " Expecting Error because of LSN mismatch from ptrack_control and previous backup start_lsn. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2017-10-05 11:06:01 +02:00
self . assertTrue ( ' ERROR: LSN from ptrack_control ' in e . message ,
2017-07-12 16:28:28 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
2017-07-12 16:28:28 +02:00
def test_page_pgpro417 ( self ) :
""" Make archive node, take full backup, take page backup, delete page backup. Try to take ptrack backup, which should fail """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2017-10-03 15:27:03 +02:00
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 30s ' , ' ptrack_enable ' : ' on ' , ' autovacuum ' : ' off ' }
2017-07-12 16:28:28 +02:00
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
self . set_archiving ( backup_dir , ' node ' , node )
node . start ( )
# FULL BACKUP
node . safe_psql (
" postgres " ,
" create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i " )
node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
self . backup_node ( backup_dir , ' node ' , node )
# PAGE BACKUP
node . safe_psql (
" postgres " ,
" insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i " )
node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
backup_id = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' page ' )
self . delete_pb ( backup_dir , ' node ' , backup_id )
2017-10-05 11:06:01 +02:00
# sys.exit(1)
2017-07-12 16:28:28 +02:00
# PTRACK BACKUP
node . safe_psql (
" postgres " ,
" insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(200,300) i " )
try :
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' )
# we should die here because exception is what we expect to happen
self . assertEqual ( 1 , 0 , " Expecting Error because of LSN mismatch from ptrack_control and previous backup start_lsn. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
2017-10-05 11:06:01 +02:00
self . assertTrue ( ' ERROR: LSN from ptrack_control ' in e . message ,
2017-07-12 16:28:28 +02:00
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
# Clean after yourself
2017-10-09 14:32:48 +02:00
self . del_test_dir ( module_name , fname )
2017-07-12 16:28:28 +02:00
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
2017-07-12 16:28:28 +02:00
def test_full_pgpro417 ( self ) :
""" Make node, take two full backups, delete full second backup. Try to take ptrack backup, which should fail """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2017-10-03 15:27:03 +02:00
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 30s ' , ' ptrack_enable ' : ' on ' , ' autovacuum ' : ' off ' }
2017-07-12 16:28:28 +02:00
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . start ( )
# FULL BACKUP
node . safe_psql (
" postgres " ,
" create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i " )
node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
self . backup_node ( backup_dir , ' node ' , node , options = [ " --stream " ] )
# SECOND FULL BACKUP
node . safe_psql (
" postgres " ,
" insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(100,200) i " )
node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
backup_id = self . backup_node ( backup_dir , ' node ' , node , options = [ " --stream " ] )
self . delete_pb ( backup_dir , ' node ' , backup_id )
# PTRACK BACKUP
node . safe_psql (
" postgres " ,
" insert into t_heap select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(200,300) i " )
try :
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " ] )
# we should die here because exception is what we expect to happen
self . assertEqual ( 1 , 0 , " Expecting Error because of LSN mismatch from ptrack_control and previous backup start_lsn. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ProbackupException as e :
self . assertTrue ( ' ERROR: LSN from ptrack_control ' in e . message
and ' Create new full backup before an incremental one ' in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-08-31 22:33:54 +02:00
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
2017-09-28 09:32:06 +02:00
def test_create_db ( self ) :
""" Make node, take full backup, create database db1, take ptrack backup, restore database and check it presense """
2017-08-31 22:33:54 +02:00
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2017-10-05 11:06:01 +02:00
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_size ' : ' 10GB ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 5min ' , ' ptrack_enable ' : ' on ' , ' autovacuum ' : ' off ' }
2017-08-31 22:33:54 +02:00
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
2017-10-05 11:06:01 +02:00
self . set_archiving ( backup_dir , ' node ' , node )
2017-08-31 22:33:54 +02:00
node . start ( )
# FULL BACKUP
2017-10-03 15:27:03 +02:00
node . safe_psql ( " postgres " ,
2017-08-31 22:33:54 +02:00
" create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i " )
node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
2017-12-13 10:15:42 +02:00
self . backup_node ( backup_dir , ' node ' , node , options = [ " --stream " , " --log-level-file=verbose " ] )
2017-11-09 11:45:04 +02:00
#sys.exit(1)
2017-08-31 22:33:54 +02:00
2017-09-28 09:32:06 +02:00
# CREATE DATABASE DB1
2017-10-03 15:27:03 +02:00
node . safe_psql ( " postgres " , " create database db1 " )
node . safe_psql ( " db1 " , " create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i " )
2017-09-28 09:32:06 +02:00
# PTRACK BACKUP
2017-12-13 10:15:42 +02:00
backup_id = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " , " --log-level-file=verbose " ] )
2017-10-05 15:47:32 +02:00
pgdata = self . pgdata_content ( node . data_dir )
2017-09-28 09:32:06 +02:00
2017-10-03 15:27:03 +02:00
# RESTORE
2017-09-28 09:32:06 +02:00
node_restored = self . make_simple_node ( base_dir = " {0} / {1} /node_restored " . format ( module_name , fname ) )
node_restored . cleanup ( )
2017-10-03 15:27:03 +02:00
# COMPARE PHYSICAL CONTENT
self . restore_node ( backup_dir , ' node ' , node_restored , backup_id = backup_id , options = [ " -j " , " 4 " ] )
2017-10-11 17:08:56 +02:00
pgdata_restored = self . pgdata_content ( node_restored . data_dir )
if self . paranoia :
self . compare_pgdata ( pgdata , pgdata_restored )
2017-09-28 09:32:06 +02:00
2017-10-03 15:27:03 +02:00
# START RESTORED NODE
node_restored . append_conf ( " postgresql.auto.conf " , " port = {0} " . format ( node_restored . port ) )
node_restored . start ( )
# DROP DATABASE DB1
node . safe_psql (
" postgres " , " drop database db1 " )
# SECOND PTRACK BACKUP
backup_id = self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " ] )
2017-10-05 15:47:32 +02:00
pgdata = self . pgdata_content ( node . data_dir )
2017-10-03 15:27:03 +02:00
# RESTORE SECOND PTRACK BACKUP
node_restored . cleanup ( )
self . restore_node ( backup_dir , ' node ' , node_restored , backup_id = backup_id , options = [ " -j " , " 4 " ] )
# START RESTORED NODE
node_restored . append_conf ( " postgresql.auto.conf " , " port = {0} " . format ( node_restored . port ) )
node_restored . start ( )
2017-10-05 15:47:32 +02:00
2017-10-05 11:06:01 +02:00
# COMPARE PHYSICAL CONTENT
2017-10-11 17:08:56 +02:00
pgdata_restored = self . pgdata_content ( node_restored . data_dir )
if self . paranoia :
self . compare_pgdata ( pgdata , pgdata_restored )
2017-10-03 15:27:03 +02:00
2017-09-28 09:32:06 +02:00
try :
node_restored . safe_psql ( ' db1 ' , ' select 1 ' )
# we should die here because exception is what we expect to happen
self . assertEqual ( 1 , 0 , " Expecting Error because we are connecting to deleted database. \n Output: {0} \n CMD: {1} " . format (
repr ( self . output ) , self . cmd ) )
except ClusterException as e :
self . assertTrue ( ' FATAL: database " db1 " does not exist ' in e . message ,
' \n Unexpected Error Message: {0} \n CMD: {1} ' . format ( repr ( e . message ) , self . cmd ) )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-08-31 22:33:54 +02:00
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
2017-10-03 15:27:03 +02:00
def test_alter_table_set_tablespace_ptrack ( self ) :
""" Make node, create tablespace with table, take full backup, alter tablespace location, take ptrack backup, restore database. """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 30s ' , ' ptrack_enable ' : ' on ' , ' autovacuum ' : ' off ' }
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . start ( )
# FULL BACKUP
self . create_tblspace_in_node ( node , ' somedata ' )
node . safe_psql (
" postgres " ,
" create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i " )
node . safe_psql ( " postgres " , " SELECT * FROM t_heap " )
self . backup_node ( backup_dir , ' node ' , node , options = [ " --stream " ] )
# ALTER TABLESPACE
self . create_tblspace_in_node ( node , ' somedata_new ' )
node . safe_psql (
" postgres " , " alter table t_heap set tablespace somedata_new " )
2017-11-09 11:45:04 +02:00
# sys.exit(1)
2017-10-03 15:27:03 +02:00
# PTRACK BACKUP
result = node . safe_psql ( " postgres " , " select * from t_heap " )
2017-12-13 10:15:42 +02:00
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " , " --log-level-file=verbose " ] )
2017-10-09 14:40:33 +02:00
pgdata = self . pgdata_content ( node . data_dir )
2017-11-09 11:45:04 +02:00
#node.stop()
#node.cleanup()
2017-10-03 15:27:03 +02:00
# RESTORE
node_restored = self . make_simple_node ( base_dir = " {0} / {1} /node_restored " . format ( module_name , fname ) )
node_restored . cleanup ( )
self . restore_node ( backup_dir , ' node ' , node_restored , options = [ " -j " , " 4 " ,
" -T " , " {0} = {1} " . format ( self . get_tblspace_path ( node , ' somedata ' ) , self . get_tblspace_path ( node_restored , ' somedata ' ) ) ,
" -T " , " {0} = {1} " . format ( self . get_tblspace_path ( node , ' somedata_new ' ) , self . get_tblspace_path ( node_restored , ' somedata_new ' ) )
] )
# GET RESTORED PGDATA AND COMPARE
2017-10-11 17:08:56 +02:00
pgdata_restored = self . pgdata_content ( node_restored . data_dir )
2017-10-03 15:27:03 +02:00
# START RESTORED NODE
2017-10-09 14:32:48 +02:00
node_restored . append_conf ( ' postgresql.auto.conf ' , ' port = {0} ' . format ( node_restored . port ) )
2017-10-03 15:27:03 +02:00
node_restored . start ( )
2017-10-09 14:32:48 +02:00
time . sleep ( 5 )
while node_restored . safe_psql ( " postgres " , " select pg_is_in_recovery() " ) == ' t \n ' :
time . sleep ( 1 )
result_new = node_restored . safe_psql ( " postgres " , " select * from t_heap " )
2017-10-03 15:27:03 +02:00
self . assertEqual ( result , result_new , ' lost some data after restore ' )
2017-10-11 17:08:56 +02:00
if self . paranoia :
self . compare_pgdata ( pgdata , pgdata_restored )
2017-10-03 15:27:03 +02:00
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
2017-10-03 15:27:03 +02:00
def test_alter_database_set_tablespace_ptrack ( self ) :
""" Make node, create tablespace with database, take full backup, alter tablespace location, take ptrack backup, restore database. """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 30s ' , ' ptrack_enable ' : ' on ' , ' autovacuum ' : ' off ' }
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . start ( )
# FULL BACKUP
self . backup_node ( backup_dir , ' node ' , node , options = [ " --stream " ] )
# CREATE TABLESPACE
self . create_tblspace_in_node ( node , ' somedata ' )
# ALTER DATABASE
node . safe_psql ( " template1 " ,
" alter database postgres set tablespace somedata " )
2017-11-09 11:45:04 +02:00
#sys.exit(1)
2017-10-03 15:27:03 +02:00
# PTRACK BACKUP
2017-12-13 10:15:42 +02:00
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " , ' --log-level-file=verbose ' ] )
2017-10-05 15:47:32 +02:00
pgdata = self . pgdata_content ( node . data_dir )
2017-10-03 15:27:03 +02:00
node . stop ( )
# RESTORE
node_restored = self . make_simple_node ( base_dir = " {0} / {1} /node_restored " . format ( module_name , fname ) )
node_restored . cleanup ( )
self . restore_node ( backup_dir , ' node ' , node_restored , options = [ " -j " , " 4 " ,
" -T " , " {0} = {1} " . format ( self . get_tblspace_path ( node , ' somedata ' ) , self . get_tblspace_path ( node_restored , ' somedata ' ) ) ] )
# GET PHYSICAL CONTENT
2017-10-11 17:08:56 +02:00
pgdata_restored = self . pgdata_content ( node_restored . data_dir )
2017-10-03 15:27:03 +02:00
# COMPARE PHYSICAL CONTENT
2017-10-11 17:08:56 +02:00
if self . paranoia :
self . compare_pgdata ( pgdata , pgdata_restored )
2017-10-03 15:27:03 +02:00
# START RESTORED NODE
node_restored . start ( )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
2017-09-28 09:32:06 +02:00
def test_drop_tablespace ( self ) :
""" Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2017-10-03 15:27:03 +02:00
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' , ' checkpoint_timeout ' : ' 30s ' , ' ptrack_enable ' : ' on ' , ' autovacuum ' : ' off ' }
2017-09-28 09:32:06 +02:00
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . start ( )
self . create_tblspace_in_node ( node , ' somedata ' )
# CREATE TABLE
2017-08-31 22:33:54 +02:00
node . safe_psql (
2017-09-28 09:32:06 +02:00
" postgres " ,
" create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i " )
result = node . safe_psql ( " postgres " , " select * from t_heap " )
# FULL BACKUP
self . backup_node ( backup_dir , ' node ' , node , options = [ " --stream " ] )
2017-08-31 22:33:54 +02:00
2017-09-28 09:32:06 +02:00
# Move table to tablespace 'somedata'
node . safe_psql (
" postgres " , " alter table t_heap set tablespace somedata " )
# PTRACK BACKUP
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " ] )
# Move table back to default tablespace
node . safe_psql (
" postgres " , " alter table t_heap set tablespace pg_default " )
2017-08-31 22:33:54 +02:00
# SECOND PTRACK BACKUP
2017-09-28 09:32:06 +02:00
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " ] )
# DROP TABLESPACE 'somedata'
2017-08-31 22:33:54 +02:00
node . safe_psql (
2017-09-28 09:32:06 +02:00
" postgres " , " drop tablespace somedata " )
# THIRD PTRACK BACKUP
2017-08-31 22:33:54 +02:00
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " ] )
2017-09-28 09:32:06 +02:00
tblspace = self . get_tblspace_path ( node , ' somedata ' )
node . cleanup ( )
shutil . rmtree ( tblspace , ignore_errors = True )
self . restore_node ( backup_dir , ' node ' , node , options = [ " -j " , " 4 " ] )
node . start ( )
tblspc_exist = node . safe_psql ( " postgres " , " select exists(select 1 from pg_tablespace where spcname = ' somedata ' ) " )
if tblspc_exist . rstrip ( ) == ' t ' :
self . assertEqual ( 1 , 0 , " Expecting Error because tablespace ' somedata ' should not be present " )
result_new = node . safe_psql ( " postgres " , " select * from t_heap " )
self . assertEqual ( result , result_new )
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-10-05 11:06:01 +02:00
# @unittest.skip("skip")
2017-10-09 14:32:48 +02:00
def test_ptrack_alter_tablespace ( self ) :
2017-09-28 09:32:06 +02:00
""" Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """
self . maxDiff = None
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2017-11-09 11:45:04 +02:00
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' ,
' checkpoint_timeout ' : ' 30s ' , ' ptrack_enable ' : ' on ' ,
' autovacuum ' : ' off ' }
2017-09-28 09:32:06 +02:00
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
node . start ( )
self . create_tblspace_in_node ( node , ' somedata ' )
2017-11-09 11:45:04 +02:00
tblspc_path = self . get_tblspace_path ( node , ' somedata ' )
2017-09-28 09:32:06 +02:00
# CREATE TABLE
node . safe_psql (
" postgres " ,
" create table t_heap as select i as id, md5(i::text) as text, md5(i::text)::tsvector as tsvector from generate_series(0,100) i " )
result = node . safe_psql ( " postgres " , " select * from t_heap " )
# FULL BACKUP
self . backup_node ( backup_dir , ' node ' , node , options = [ " --stream " ] )
# Move table to separate tablespace
node . safe_psql (
" postgres " , " alter table t_heap set tablespace somedata " )
2017-11-09 11:45:04 +02:00
# GET LOGICAL CONTENT FROM NODE
result = node . safe_psql ( " postgres " , " select * from t_heap " )
2017-09-28 09:32:06 +02:00
# FIRTS PTRACK BACKUP
2017-12-13 10:15:42 +02:00
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " , " --log-level-file=verbose " ] )
2017-11-09 11:45:04 +02:00
# GET PHYSICAL CONTENT FROM NODE
2017-10-09 14:32:48 +02:00
pgdata = self . pgdata_content ( node . data_dir )
2017-09-28 09:32:06 +02:00
2017-11-09 11:45:04 +02:00
# Restore ptrack backup
2017-09-28 09:32:06 +02:00
restored_node = self . make_simple_node ( base_dir = " {0} / {1} /restored_node " . format ( module_name , fname ) )
restored_node . cleanup ( )
tblspc_path_new = self . get_tblspace_path ( restored_node , ' somedata_restored ' )
self . restore_node ( backup_dir , ' node ' , restored_node , options = [
" -j " , " 4 " , " -T " , " {0} = {1} " . format ( tblspc_path , tblspc_path_new ) ] )
2017-10-03 15:27:03 +02:00
2017-11-09 11:45:04 +02:00
# GET PHYSICAL CONTENT FROM RESTORED NODE
2017-10-11 17:08:56 +02:00
pgdata_restored = self . pgdata_content ( restored_node . data_dir )
2017-10-09 14:32:48 +02:00
# START RESTORED NODE
2017-09-28 09:32:06 +02:00
restored_node . append_conf ( " postgresql.auto.conf " , " port = {0} " . format ( restored_node . port ) )
restored_node . start ( )
2017-10-09 14:32:48 +02:00
while restored_node . safe_psql ( " postgres " , " select pg_is_in_recovery() " ) == ' t \n ' :
time . sleep ( 1 )
2017-09-28 09:32:06 +02:00
2017-11-09 11:45:04 +02:00
# COMPARE LOGICAL CONTENT
2017-09-28 09:32:06 +02:00
result_new = restored_node . safe_psql ( " postgres " , " select * from t_heap " )
self . assertEqual ( result , result_new )
2017-10-11 17:08:56 +02:00
# COMPARE PHYSICAL CONTENT
if self . paranoia :
self . compare_pgdata ( pgdata , pgdata_restored )
2017-09-28 09:32:06 +02:00
restored_node . cleanup ( )
shutil . rmtree ( tblspc_path_new , ignore_errors = True )
# Move table to default tablespace
node . safe_psql (
" postgres " , " alter table t_heap set tablespace pg_default " )
# SECOND PTRACK BACKUP
2017-12-13 10:15:42 +02:00
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --stream " , " --log-level-file=verbose " ] )
2017-10-09 14:32:48 +02:00
pgdata = self . pgdata_content ( node . data_dir )
2017-09-28 09:32:06 +02:00
# Restore second ptrack backup and check table consistency
self . restore_node ( backup_dir , ' node ' , restored_node , options = [
" -j " , " 4 " , " -T " , " {0} = {1} " . format ( tblspc_path , tblspc_path_new ) ] )
2017-10-09 14:32:48 +02:00
2017-11-09 11:45:04 +02:00
# GET PHYSICAL CONTENT FROM RESTORED NODE
2017-10-11 17:08:56 +02:00
pgdata_restored = self . pgdata_content ( restored_node . data_dir )
2017-10-09 14:32:48 +02:00
# START RESTORED NODE
2017-11-09 11:45:04 +02:00
restored_node . append_conf ( " postgresql.auto.conf " , " port = {0} " . format ( restored_node . port ) )
2017-10-09 14:32:48 +02:00
restored_node . start ( )
while restored_node . safe_psql ( " postgres " , " select pg_is_in_recovery() " ) == ' t \n ' :
time . sleep ( 1 )
2017-09-28 09:32:06 +02:00
result_new = restored_node . safe_psql ( " postgres " , " select * from t_heap " )
self . assertEqual ( result , result_new )
2017-10-11 17:08:56 +02:00
if self . paranoia :
# COMPARE PHYSICAL CONTENT
self . compare_pgdata ( pgdata , pgdata_restored )
2017-08-31 22:33:54 +02:00
# Clean after yourself
self . del_test_dir ( module_name , fname )
2017-10-03 15:27:03 +02:00
2017-10-03 17:26:55 +02:00
# @unittest.skip("skip")
2017-11-09 11:45:04 +02:00
def test_ptrack_multiple_segments ( self ) :
2017-10-03 15:27:03 +02:00
""" Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """
fname = self . id ( ) . split ( ' . ' ) [ 3 ]
backup_dir = os . path . join ( self . tmp_path , module_name , fname , ' backup ' )
node = self . make_simple_node ( base_dir = " {0} / {1} /node " . format ( module_name , fname ) ,
set_replication = True ,
initdb_params = [ ' --data-checksums ' ] ,
2017-10-09 14:32:48 +02:00
pg_options = { ' wal_level ' : ' replica ' , ' max_wal_senders ' : ' 2 ' ,
' ptrack_enable ' : ' on ' , ' fsync ' : ' off ' , ' shared_buffers ' : ' 1GB ' ,
2017-11-07 08:40:13 +02:00
' maintenance_work_mem ' : ' 1GB ' , ' autovacuum ' : ' off ' , ' full_page_writes ' : ' off ' }
2017-10-03 15:27:03 +02:00
)
self . init_pb ( backup_dir )
self . add_instance ( backup_dir , ' node ' , node )
2017-11-07 08:40:13 +02:00
self . set_archiving ( backup_dir , ' node ' , node )
2017-10-03 15:27:03 +02:00
node . start ( )
2017-10-05 15:47:32 +02:00
self . create_tblspace_in_node ( node , ' somedata ' )
2017-10-03 15:27:03 +02:00
# CREATE TABLE
2017-10-09 14:32:48 +02:00
node . pgbench_init ( scale = 100 , options = [ ' --tablespace=somedata ' ] )
2017-10-03 15:27:03 +02:00
# FULL BACKUP
2017-11-07 08:40:13 +02:00
self . backup_node ( backup_dir , ' node ' , node )
# PTRACK STUFF
idx_ptrack = { ' type ' : ' heap ' }
idx_ptrack [ ' path ' ] = self . get_fork_path ( node , ' pgbench_accounts ' )
idx_ptrack [ ' old_size ' ] = self . get_fork_size ( node , ' pgbench_accounts ' )
idx_ptrack [ ' old_pages ' ] = self . get_md5_per_page_for_fork (
idx_ptrack [ ' path ' ] , idx_ptrack [ ' old_size ' ] )
2017-10-03 15:27:03 +02:00
2017-11-09 11:45:04 +02:00
pgbench = node . pgbench ( options = [ ' -T ' , ' 150 ' , ' -c ' , ' 2 ' , ' --no-vacuum ' ] )
2017-10-03 15:27:03 +02:00
pgbench . wait ( )
2017-11-07 08:40:13 +02:00
node . safe_psql ( " postgres " , " checkpoint " )
idx_ptrack [ ' new_size ' ] = self . get_fork_size ( node , ' pgbench_accounts ' )
idx_ptrack [ ' new_pages ' ] = self . get_md5_per_page_for_fork ( idx_ptrack [ ' path ' ] , idx_ptrack [ ' new_size ' ] )
2017-11-09 11:45:04 +02:00
idx_ptrack [ ' ptrack ' ] = self . get_ptrack_bits_per_page_for_fork ( node , idx_ptrack [ ' path ' ] )
2017-11-07 08:40:13 +02:00
self . check_ptrack_sanity ( idx_ptrack )
2017-11-09 11:45:04 +02:00
2017-10-03 15:27:03 +02:00
2017-10-09 14:32:48 +02:00
# GET LOGICAL CONTENT FROM NODE
2017-10-03 15:27:03 +02:00
result = node . safe_psql ( " postgres " , " select * from pgbench_accounts " )
2017-10-09 14:32:48 +02:00
# FIRTS PTRACK BACKUP
2017-12-13 10:15:42 +02:00
self . backup_node ( backup_dir , ' node ' , node , backup_type = ' ptrack ' , options = [ " --log-level-file=verbose " ] )
2017-10-09 14:32:48 +02:00
# GET PHYSICAL CONTENT FROM NODE
2017-10-03 17:26:55 +02:00
pgdata = self . pgdata_content ( node . data_dir )
2017-10-03 15:27:03 +02:00
2017-10-03 17:26:55 +02:00
# RESTORE NODE
2017-10-03 15:27:03 +02:00
restored_node = self . make_simple_node ( base_dir = " {0} / {1} /restored_node " . format ( module_name , fname ) )
restored_node . cleanup ( )
2017-10-05 15:47:32 +02:00
tblspc_path = self . get_tblspace_path ( node , ' somedata ' )
tblspc_path_new = self . get_tblspace_path ( restored_node , ' somedata_restored ' )
2017-10-03 15:27:03 +02:00
self . restore_node ( backup_dir , ' node ' , restored_node , options = [
2017-10-05 15:47:32 +02:00
" -j " , " 4 " , " -T " , " {0} = {1} " . format ( tblspc_path , tblspc_path_new ) ] )
2017-10-09 14:32:48 +02:00
2017-10-03 15:27:03 +02:00
# GET PHYSICAL CONTENT FROM NODE_RESTORED
pgdata_restored = self . pgdata_content ( restored_node . data_dir )
2017-10-05 15:47:32 +02:00
# START RESTORED NODE
2017-10-03 15:27:03 +02:00
restored_node . append_conf ( " postgresql.auto.conf " , " port = {0} " . format ( restored_node . port ) )
restored_node . start ( )
2017-10-09 14:32:48 +02:00
while restored_node . safe_psql ( " postgres " , " select pg_is_in_recovery() " ) == ' t \n ' :
time . sleep ( 1 )
2017-10-03 15:27:03 +02:00
result_new = restored_node . safe_psql ( " postgres " , " select * from pgbench_accounts " )
2017-10-11 17:08:56 +02:00
# COMPARE RESTORED FILES
2017-11-07 08:40:13 +02:00
self . assertEqual ( result , result_new , ' data is lost ' )
2017-10-03 15:27:03 +02:00
2017-10-11 17:08:56 +02:00
if self . paranoia :
self . compare_pgdata ( pgdata , pgdata_restored )
2017-10-03 15:27:03 +02:00
# Clean after yourself
2017-11-09 11:45:04 +02:00
self . del_test_dir ( module_name , fname )