You've already forked pg_probackup
mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-07-15 07:04:14 +02:00
Merge branch 'master' into stable
This commit is contained in:
@ -14,7 +14,6 @@
|
|||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <pthread.h>
|
|
||||||
|
|
||||||
#include "catalog/pg_control.h"
|
#include "catalog/pg_control.h"
|
||||||
#include "utils/logger.h"
|
#include "utils/logger.h"
|
||||||
|
@ -13,7 +13,9 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
@unittest.expectedFailure
|
@unittest.expectedFailure
|
||||||
def test_validate_wal_lost_segment(self):
|
def test_validate_wal_lost_segment(self):
|
||||||
"""Loose segment located between backups. ExpectedFailure. This is BUG """
|
"""
|
||||||
|
Loose segment located between backups. ExpectedFailure. This is BUG
|
||||||
|
"""
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
@ -31,14 +33,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
|
|||||||
self.backup_node(backup_dir, 'node', node)
|
self.backup_node(backup_dir, 'node', node)
|
||||||
|
|
||||||
# make some wals
|
# make some wals
|
||||||
node.pgbench_init(scale=2)
|
node.pgbench_init(scale=5)
|
||||||
pgbench = node.pgbench(
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
options=["-c", "4", "-T", "10"]
|
|
||||||
)
|
|
||||||
pgbench.wait()
|
|
||||||
pgbench.stdout.close()
|
|
||||||
|
|
||||||
# delete last wal segment
|
# delete last wal segment
|
||||||
wals_dir = os.path.join(backup_dir, "wal", 'node')
|
wals_dir = os.path.join(backup_dir, "wal", 'node')
|
||||||
|
@ -614,7 +614,8 @@ class ProbackupTest(object):
|
|||||||
|
|
||||||
return self.run_pb(cmd_list + options, async, gdb)
|
return self.run_pb(cmd_list + options, async, gdb)
|
||||||
|
|
||||||
def merge_backup(self, backup_dir, instance, backup_id):
|
def merge_backup(
|
||||||
|
self, backup_dir, instance, backup_id, async=False, gdb=False):
|
||||||
cmd_list = [
|
cmd_list = [
|
||||||
"merge",
|
"merge",
|
||||||
"-B", backup_dir,
|
"-B", backup_dir,
|
||||||
@ -622,7 +623,7 @@ class ProbackupTest(object):
|
|||||||
"-i", backup_id
|
"-i", backup_id
|
||||||
]
|
]
|
||||||
|
|
||||||
return self.run_pb(cmd_list)
|
return self.run_pb(cmd_list, async, gdb)
|
||||||
|
|
||||||
def restore_node(
|
def restore_node(
|
||||||
self, backup_dir, instance, node=False,
|
self, backup_dir, instance, node=False,
|
||||||
|
@ -458,7 +458,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_merge_delta_delete(self):
|
def test_merge_delta_delete(self):
|
||||||
"""
|
"""
|
||||||
Make node, create tablespace with table, take full backup,
|
Make node, create tablespace with table, take full backup,
|
||||||
alter tablespace location, take delta backup, restore database.
|
alter tablespace location, take delta backup, merge full and delta,
|
||||||
|
restore database.
|
||||||
"""
|
"""
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
@ -542,3 +543,74 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
# Clean after yourself
|
# Clean after yourself
|
||||||
self.del_test_dir(module_name, fname)
|
self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
|
# @unittest.skip("skip")
|
||||||
|
def test_continue_failed_merge(self):
|
||||||
|
"""
|
||||||
|
Check that failed MERGE can be continued
|
||||||
|
"""
|
||||||
|
fname = self.id().split('.')[3]
|
||||||
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
|
node = self.make_simple_node(
|
||||||
|
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
|
set_replication=True, initdb_params=['--data-checksums'],
|
||||||
|
pg_options={
|
||||||
|
'wal_level': 'replica'
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.init_pb(backup_dir)
|
||||||
|
self.add_instance(backup_dir, 'node', node)
|
||||||
|
self.set_archiving(backup_dir, 'node', node)
|
||||||
|
node.start()
|
||||||
|
|
||||||
|
# FULL backup
|
||||||
|
self.backup_node(backup_dir, 'node', node)
|
||||||
|
|
||||||
|
node.safe_psql(
|
||||||
|
"postgres",
|
||||||
|
"create table t_heap as select i as id,"
|
||||||
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
||||||
|
" from generate_series(0,1000) i"
|
||||||
|
)
|
||||||
|
|
||||||
|
# DELTA BACKUP
|
||||||
|
self.backup_node(
|
||||||
|
backup_dir, 'node', node, backup_type='delta'
|
||||||
|
)
|
||||||
|
|
||||||
|
node.safe_psql(
|
||||||
|
"postgres",
|
||||||
|
"delete from t_heap"
|
||||||
|
)
|
||||||
|
|
||||||
|
node.safe_psql(
|
||||||
|
"postgres",
|
||||||
|
"vacuum t_heap"
|
||||||
|
)
|
||||||
|
|
||||||
|
# DELTA BACKUP
|
||||||
|
self.backup_node(
|
||||||
|
backup_dir, 'node', node, backup_type='delta'
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.paranoia:
|
||||||
|
pgdata = self.pgdata_content(node.data_dir)
|
||||||
|
|
||||||
|
backup_id = self.show_pb(backup_dir, "node")[2]["id"]
|
||||||
|
|
||||||
|
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
||||||
|
|
||||||
|
gdb.set_breakpoint('move_file')
|
||||||
|
gdb.run_until_break()
|
||||||
|
|
||||||
|
if gdb.continue_execution_until_break(20) != 'breakpoint-hit':
|
||||||
|
print('Failed to hit breakpoint')
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
gdb._execute('signal SIGKILL')
|
||||||
|
|
||||||
|
print(self.show_pb(backup_dir, as_text=True, as_json=False))
|
||||||
|
|
||||||
|
# Try to continue failed MERGE
|
||||||
|
self.merge_backup(backup_dir, "node", backup_id)
|
||||||
|
100
tests/page.py
100
tests/page.py
@ -166,11 +166,6 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
'\n Unexpected Error Message: {0}\n'
|
'\n Unexpected Error Message: {0}\n'
|
||||||
' CMD: {1}'.format(repr(self.output), self.cmd))
|
' CMD: {1}'.format(repr(self.output), self.cmd))
|
||||||
|
|
||||||
# GET RESTORED PGDATA AND COMPARE
|
|
||||||
if self.paranoia:
|
|
||||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
|
||||||
self.compare_pgdata(pgdata, pgdata_restored)
|
|
||||||
|
|
||||||
node.slow_start()
|
node.slow_start()
|
||||||
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||||
self.assertEqual(full_result, full_result_new)
|
self.assertEqual(full_result, full_result_new)
|
||||||
@ -184,6 +179,12 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
backup_id=page_backup_id, options=["-j", "4"]),
|
backup_id=page_backup_id, options=["-j", "4"]),
|
||||||
'\n Unexpected Error Message: {0}\n'
|
'\n Unexpected Error Message: {0}\n'
|
||||||
' CMD: {1}'.format(repr(self.output), self.cmd))
|
' CMD: {1}'.format(repr(self.output), self.cmd))
|
||||||
|
|
||||||
|
# GET RESTORED PGDATA AND COMPARE
|
||||||
|
if self.paranoia:
|
||||||
|
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||||
|
self.compare_pgdata(pgdata, pgdata_restored)
|
||||||
|
|
||||||
node.slow_start()
|
node.slow_start()
|
||||||
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||||
self.assertEqual(page_result, page_result_new)
|
self.assertEqual(page_result, page_result_new)
|
||||||
@ -659,3 +660,92 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# Clean after yourself
|
# Clean after yourself
|
||||||
node.cleanup()
|
node.cleanup()
|
||||||
self.del_test_dir(module_name, fname)
|
self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
|
# @unittest.skip("skip")
|
||||||
|
def test_page_backup_with_lost_wal_segment(self):
|
||||||
|
"""
|
||||||
|
make node with archiving
|
||||||
|
make archive backup, then generate some wals with pgbench,
|
||||||
|
delete latest archived wal segment
|
||||||
|
run page backup, expecting error because of missing wal segment
|
||||||
|
make sure that backup status is 'ERROR'
|
||||||
|
"""
|
||||||
|
fname = self.id().split('.')[3]
|
||||||
|
node = self.make_simple_node(
|
||||||
|
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
|
initdb_params=['--data-checksums'],
|
||||||
|
pg_options={'wal_level': 'replica'}
|
||||||
|
)
|
||||||
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
|
self.init_pb(backup_dir)
|
||||||
|
self.add_instance(backup_dir, 'node', node)
|
||||||
|
self.set_archiving(backup_dir, 'node', node)
|
||||||
|
node.start()
|
||||||
|
|
||||||
|
self.backup_node(backup_dir, 'node', node)
|
||||||
|
|
||||||
|
# make some wals
|
||||||
|
node.pgbench_init(scale=3)
|
||||||
|
|
||||||
|
# delete last wal segment
|
||||||
|
wals_dir = os.path.join(backup_dir, 'wal', 'node')
|
||||||
|
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
|
||||||
|
wals_dir, f)) and not f.endswith('.backup')]
|
||||||
|
wals = map(str, wals)
|
||||||
|
file = os.path.join(wals_dir, max(wals))
|
||||||
|
os.remove(file)
|
||||||
|
if self.archive_compress:
|
||||||
|
file = file[:-3]
|
||||||
|
|
||||||
|
# Single-thread PAGE backup
|
||||||
|
try:
|
||||||
|
self.backup_node(
|
||||||
|
backup_dir, 'node', node,
|
||||||
|
backup_type='page')
|
||||||
|
self.assertEqual(
|
||||||
|
1, 0,
|
||||||
|
"Expecting Error because of wal segment disappearance.\n "
|
||||||
|
"Output: {0} \n CMD: {1}".format(
|
||||||
|
self.output, self.cmd))
|
||||||
|
except ProbackupException as e:
|
||||||
|
self.assertTrue(
|
||||||
|
'INFO: Wait for LSN' in e.message and
|
||||||
|
'in archived WAL segment' in e.message and
|
||||||
|
'WARNING: could not read WAL record at' in e.message and
|
||||||
|
'ERROR: WAL segment "{0}" is absent\n'.format(
|
||||||
|
file) in e.message,
|
||||||
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||||
|
repr(e.message), self.cmd))
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
'ERROR',
|
||||||
|
self.show_pb(backup_dir, 'node')[1]['status'],
|
||||||
|
'Backup {0} should have STATUS "ERROR"')
|
||||||
|
|
||||||
|
# Multi-thread PAGE backup
|
||||||
|
try:
|
||||||
|
self.backup_node(
|
||||||
|
backup_dir, 'node', node,
|
||||||
|
backup_type='page', options=["-j", "4"])
|
||||||
|
self.assertEqual(
|
||||||
|
1, 0,
|
||||||
|
"Expecting Error because of wal segment disappearance.\n "
|
||||||
|
"Output: {0} \n CMD: {1}".format(
|
||||||
|
self.output, self.cmd))
|
||||||
|
except ProbackupException as e:
|
||||||
|
self.assertTrue(
|
||||||
|
'INFO: Wait for LSN' in e.message and
|
||||||
|
'in archived WAL segment' in e.message and
|
||||||
|
'WARNING: could not read WAL record at' in e.message and
|
||||||
|
'ERROR: WAL segment "{0}" is absent\n'.format(
|
||||||
|
file) in e.message,
|
||||||
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||||
|
repr(e.message), self.cmd))
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
'ERROR',
|
||||||
|
self.show_pb(backup_dir, 'node')[2]['status'],
|
||||||
|
'Backup {0} should have STATUS "ERROR"')
|
||||||
|
|
||||||
|
# Clean after yourself
|
||||||
|
self.del_test_dir(module_name, fname)
|
||||||
|
@ -1236,78 +1236,12 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
|||||||
# Clean after yourself
|
# Clean after yourself
|
||||||
self.del_test_dir(module_name, fname)
|
self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
# @unittest.skip("skip")
|
|
||||||
def test_validate_wal_lost_segment_2(self):
|
|
||||||
"""
|
|
||||||
make node with archiving
|
|
||||||
make archive backup
|
|
||||||
delete from archive wal segment which DO NOT belong to this backup
|
|
||||||
run validate, expecting error because of missing wal segment
|
|
||||||
make sure that backup status is 'ERROR'
|
|
||||||
"""
|
|
||||||
fname = self.id().split('.')[3]
|
|
||||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
|
||||||
initdb_params=['--data-checksums'],
|
|
||||||
pg_options={'wal_level': 'replica'}
|
|
||||||
)
|
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
||||||
self.init_pb(backup_dir)
|
|
||||||
self.add_instance(backup_dir, 'node', node)
|
|
||||||
self.set_archiving(backup_dir, 'node', node)
|
|
||||||
node.start()
|
|
||||||
|
|
||||||
self.backup_node(backup_dir, 'node', node)
|
|
||||||
|
|
||||||
# make some wals
|
|
||||||
node.pgbench_init(scale=2)
|
|
||||||
pgbench = node.pgbench(
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
options=["-c", "4", "-T", "10"]
|
|
||||||
)
|
|
||||||
pgbench.wait()
|
|
||||||
pgbench.stdout.close()
|
|
||||||
|
|
||||||
# delete last wal segment
|
|
||||||
wals_dir = os.path.join(backup_dir, 'wal', 'node')
|
|
||||||
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
|
|
||||||
wals_dir, f)) and not f.endswith('.backup')]
|
|
||||||
wals = map(str, wals)
|
|
||||||
file = os.path.join(wals_dir, max(wals))
|
|
||||||
os.remove(file)
|
|
||||||
if self.archive_compress:
|
|
||||||
file = file[:-3]
|
|
||||||
|
|
||||||
# Try to restore
|
|
||||||
try:
|
|
||||||
backup_id = self.backup_node(
|
|
||||||
backup_dir, 'node', node, backup_type='page')
|
|
||||||
self.assertEqual(
|
|
||||||
1, 0,
|
|
||||||
"Expecting Error because of wal segment disappearance.\n "
|
|
||||||
"Output: {0} \n CMD: {1}".format(
|
|
||||||
self.output, self.cmd))
|
|
||||||
except ProbackupException as e:
|
|
||||||
self.assertTrue(
|
|
||||||
'INFO: Wait for LSN' in e.message and
|
|
||||||
'in archived WAL segment' in e.message and
|
|
||||||
'WARNING: could not read WAL record at' in e.message and
|
|
||||||
'ERROR: WAL segment "{0}" is absent\n'.format(
|
|
||||||
file) in e.message,
|
|
||||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
||||||
repr(e.message), self.cmd))
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
'ERROR',
|
|
||||||
self.show_pb(backup_dir, 'node')[1]['status'],
|
|
||||||
'Backup {0} should have STATUS "ERROR"')
|
|
||||||
|
|
||||||
# Clean after yourself
|
|
||||||
self.del_test_dir(module_name, fname)
|
|
||||||
|
|
||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_pgpro702_688(self):
|
def test_pgpro702_688(self):
|
||||||
"""make node without archiving, make stream backup, get Recovery Time, validate to Recovery Time"""
|
"""
|
||||||
|
make node without archiving, make stream backup,
|
||||||
|
get Recovery Time, validate to Recovery Time
|
||||||
|
"""
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
@ -1346,7 +1280,10 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_pgpro688(self):
|
def test_pgpro688(self):
|
||||||
"""make node with archiving, make backup, get Recovery Time, validate to Recovery Time. Waiting PGPRO-688. RESOLVED"""
|
"""
|
||||||
|
make node with archiving, make backup, get Recovery Time,
|
||||||
|
validate to Recovery Time. Waiting PGPRO-688. RESOLVED
|
||||||
|
"""
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
@ -1361,9 +1298,11 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
|||||||
node.start()
|
node.start()
|
||||||
|
|
||||||
backup_id = self.backup_node(backup_dir, 'node', node)
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
||||||
recovery_time = self.show_pb(backup_dir, 'node', backup_id)['recovery-time']
|
recovery_time = self.show_pb(
|
||||||
|
backup_dir, 'node', backup_id)['recovery-time']
|
||||||
|
|
||||||
self.validate_pb(backup_dir, 'node', options=["--time={0}".format(recovery_time)])
|
self.validate_pb(
|
||||||
|
backup_dir, 'node', options=["--time={0}".format(recovery_time)])
|
||||||
|
|
||||||
# Clean after yourself
|
# Clean after yourself
|
||||||
self.del_test_dir(module_name, fname)
|
self.del_test_dir(module_name, fname)
|
||||||
|
Reference in New Issue
Block a user