2017-02-25 14:12:07 +02:00
|
|
|
import os
|
2017-06-27 11:43:45 +02:00
|
|
|
import unittest
|
2017-06-27 07:42:52 +02:00
|
|
|
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
2016-12-13 19:45:50 +02:00
|
|
|
import subprocess
|
|
|
|
from datetime import datetime
|
2018-03-23 12:23:17 +02:00
|
|
|
import sys
|
2019-02-09 00:59:43 +02:00
|
|
|
from time import sleep
|
2019-03-27 17:16:53 +02:00
|
|
|
from datetime import datetime, timedelta
|
2019-04-13 15:50:50 +02:00
|
|
|
import hashlib
|
2016-12-13 19:45:50 +02:00
|
|
|
|
|
|
|
|
2017-07-12 16:28:28 +02:00
|
|
|
module_name = 'restore'
|
|
|
|
|
2016-12-13 19:45:50 +02:00
|
|
|
|
2017-07-12 16:28:28 +02:00
|
|
|
class RestoreTest(ProbackupTest, unittest.TestCase):
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-05-22 13:17:43 +02:00
|
|
|
# @unittest.expectedFailure
|
2017-05-03 13:14:48 +02:00
|
|
|
def test_restore_full_to_latest(self):
|
|
|
|
"""recovery to latest from full backup"""
|
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
2017-11-03 14:49:31 +02:00
|
|
|
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-06-20 12:57:23 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
node.pgbench_init(scale=2)
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-05-03 13:14:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
2017-06-20 12:57:23 +02:00
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
# 1 - Test recovery from latest
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
2018-04-11 18:47:19 +02:00
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=["-j", "4", "--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# 2 - Test that recovery.conf was created
|
2017-06-20 12:57:23 +02:00
|
|
|
recovery_conf = os.path.join(node.data_dir, "recovery.conf")
|
|
|
|
self.assertEqual(os.path.isfile(recovery_conf), True)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-05-03 13:14:48 +02:00
|
|
|
def test_restore_full_page_to_latest(self):
|
|
|
|
"""recovery to latest from full + page backups"""
|
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-06-20 12:57:23 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
node.pgbench_init(scale=2)
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-05-03 13:14:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type="page")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
2018-04-11 18:47:19 +02:00
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=["-j", "4", "--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-06-27 07:42:52 +02:00
|
|
|
def test_restore_to_specific_timeline(self):
|
2017-05-03 13:14:48 +02:00
|
|
|
"""recovery to target timeline"""
|
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-06-20 12:57:23 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
node.pgbench_init(scale=2)
|
|
|
|
|
|
|
|
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
target_tli = int(
|
|
|
|
node.get_control_data()["Latest checkpoint's TimeLineID"])
|
2017-09-28 09:32:06 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
2018-04-11 18:47:19 +02:00
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=["-j", "4", "--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
2017-10-09 14:32:48 +02:00
|
|
|
options=['-T', '10', '-c', '2', '--no-vacuum'])
|
2017-05-03 13:14:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# Correct Backup must be choosen for restore
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
2018-04-11 18:47:19 +02:00
|
|
|
options=[
|
|
|
|
"-j", "4", "--timeline={0}".format(target_tli),
|
|
|
|
"--recovery-target-action=promote"]
|
|
|
|
),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
recovery_target_timeline = self.get_recovery_conf(
|
|
|
|
node)["recovery_target_timeline"]
|
2017-05-03 13:14:48 +02:00
|
|
|
self.assertEqual(int(recovery_target_timeline), target_tli)
|
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-05-03 13:14:48 +02:00
|
|
|
def test_restore_to_time(self):
|
2017-06-20 12:57:23 +02:00
|
|
|
"""recovery to target time"""
|
2017-05-03 13:14:48 +02:00
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2017-10-09 14:32:48 +02:00
|
|
|
node.append_conf("postgresql.auto.conf", "TimeZone = Europe/Moscow")
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
node.pgbench_init(scale=2)
|
2017-05-03 13:14:48 +02:00
|
|
|
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
target_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-05-03 13:14:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2017-06-07 16:52:07 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
2018-04-11 18:47:19 +02:00
|
|
|
options=[
|
|
|
|
"-j", "4", '--time={0}'.format(target_time),
|
|
|
|
"--recovery-target-action=promote"
|
|
|
|
]
|
|
|
|
),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2018-05-03 13:12:19 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-10-09 14:32:48 +02:00
|
|
|
def test_restore_to_xid_inclusive(self):
|
2017-05-03 13:14:48 +02:00
|
|
|
"""recovery to target xid"""
|
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-06-20 12:57:23 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
node.pgbench_init(scale=2)
|
|
|
|
with node.connect("postgres") as con:
|
|
|
|
con.execute("CREATE TABLE tbl0005 (a text)")
|
|
|
|
con.commit()
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-05-03 13:14:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2017-10-09 14:32:48 +02:00
|
|
|
before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
|
2017-05-03 13:14:48 +02:00
|
|
|
with node.connect("postgres") as con:
|
|
|
|
res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
|
|
|
|
con.commit()
|
|
|
|
target_xid = res[0][0]
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-05-03 13:14:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
2018-04-11 18:47:19 +02:00
|
|
|
options=[
|
|
|
|
"-j", "4", '--xid={0}'.format(target_xid),
|
|
|
|
"--recovery-target-action=promote"]
|
|
|
|
),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-10-09 14:32:48 +02:00
|
|
|
after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertEqual(
|
|
|
|
len(node.execute("postgres", "SELECT * FROM tbl0005")), 1)
|
2017-10-09 14:32:48 +02:00
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_restore_to_xid_not_inclusive(self):
|
|
|
|
"""recovery with target inclusive false"""
|
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-10-09 14:32:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
2018-03-23 12:23:17 +02:00
|
|
|
pg_options={
|
|
|
|
'wal_level': 'replica',
|
|
|
|
'ptrack_enable': 'on',
|
|
|
|
'max_wal_senders': '2'}
|
2017-10-09 14:32:48 +02:00
|
|
|
)
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-10-09 14:32:48 +02:00
|
|
|
|
|
|
|
node.pgbench_init(scale=2)
|
|
|
|
with node.connect("postgres") as con:
|
|
|
|
con.execute("CREATE TABLE tbl0005 (a text)")
|
|
|
|
con.commit()
|
|
|
|
|
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-10-09 14:32:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
|
|
|
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
with node.connect("postgres") as con:
|
|
|
|
result = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
|
|
|
|
con.commit()
|
|
|
|
target_xid = result[0][0]
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-10-09 14:32:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=[
|
|
|
|
"-j", "4",
|
|
|
|
'--xid={0}'.format(target_xid),
|
2018-04-11 18:47:19 +02:00
|
|
|
"--inclusive=false",
|
|
|
|
"--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-10-09 14:32:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertEqual(
|
|
|
|
len(node.execute("postgres", "SELECT * FROM tbl0005")), 0)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-06-20 15:00:44 +02:00
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_restore_to_lsn_inclusive(self):
|
|
|
|
"""recovery to target lsn"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-06-20 15:00:44 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.get_version(node) < self.version_to_num('10.0'):
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
return
|
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-06-20 15:00:44 +02:00
|
|
|
|
|
|
|
node.pgbench_init(scale=2)
|
|
|
|
with node.connect("postgres") as con:
|
|
|
|
con.execute("CREATE TABLE tbl0005 (a int)")
|
|
|
|
con.commit()
|
|
|
|
|
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
|
|
|
before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
with node.connect("postgres") as con:
|
|
|
|
con.execute("INSERT INTO tbl0005 VALUES (1)")
|
|
|
|
con.commit()
|
|
|
|
res = con.execute("SELECT pg_current_wal_lsn()")
|
|
|
|
con.commit()
|
|
|
|
con.execute("INSERT INTO tbl0005 VALUES (2)")
|
|
|
|
con.commit()
|
|
|
|
xlogid, xrecoff = res[0][0].split('/')
|
|
|
|
xrecoff = hex(int(xrecoff, 16) + 1)[2:]
|
|
|
|
target_lsn = "{0}/{1}".format(xlogid, xrecoff)
|
|
|
|
|
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=[
|
|
|
|
"-j", "4", '--lsn={0}'.format(target_lsn),
|
|
|
|
"--recovery-target-action=promote"]
|
|
|
|
),
|
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
|
|
|
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
|
|
|
self.assertEqual(
|
|
|
|
len(node.execute("postgres", "SELECT * FROM tbl0005")), 2)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_restore_to_lsn_not_inclusive(self):
|
|
|
|
"""recovery to target lsn"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-06-20 15:00:44 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.get_version(node) < self.version_to_num('10.0'):
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
return
|
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-06-20 15:00:44 +02:00
|
|
|
|
|
|
|
node.pgbench_init(scale=2)
|
|
|
|
with node.connect("postgres") as con:
|
|
|
|
con.execute("CREATE TABLE tbl0005 (a int)")
|
|
|
|
con.commit()
|
|
|
|
|
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
|
|
|
before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
with node.connect("postgres") as con:
|
|
|
|
con.execute("INSERT INTO tbl0005 VALUES (1)")
|
|
|
|
con.commit()
|
|
|
|
res = con.execute("SELECT pg_current_wal_lsn()")
|
|
|
|
con.commit()
|
|
|
|
con.execute("INSERT INTO tbl0005 VALUES (2)")
|
|
|
|
con.commit()
|
|
|
|
xlogid, xrecoff = res[0][0].split('/')
|
|
|
|
xrecoff = hex(int(xrecoff, 16) + 1)[2:]
|
|
|
|
target_lsn = "{0}/{1}".format(xlogid, xrecoff)
|
|
|
|
|
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=[
|
|
|
|
"--inclusive=false",
|
|
|
|
"-j", "4", '--lsn={0}'.format(target_lsn),
|
|
|
|
"--recovery-target-action=promote"]
|
|
|
|
),
|
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
|
|
|
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
|
|
|
self.assertEqual(
|
|
|
|
len(node.execute("postgres", "SELECT * FROM tbl0005")), 1)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-06-07 16:52:07 +02:00
|
|
|
def test_restore_full_ptrack_archive(self):
|
2017-06-20 12:57:23 +02:00
|
|
|
"""recovery to latest from archive full+ptrack backups"""
|
2017-05-03 13:14:48 +02:00
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
node.pgbench_init(scale=2)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-05-03 13:14:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type="ptrack")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
2018-04-11 18:47:19 +02:00
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=[
|
|
|
|
"-j", "4", "--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_restore_ptrack(self):
|
|
|
|
"""recovery to latest from archive full+ptrack+ptrack backups"""
|
2017-05-03 13:14:48 +02:00
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
node.pgbench_init(scale=2)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-05-03 13:14:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-05-03 13:14:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type="ptrack")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
2018-04-11 18:47:19 +02:00
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=[
|
|
|
|
"-j", "4", "--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-05-03 13:14:48 +02:00
|
|
|
def test_restore_full_ptrack_stream(self):
|
|
|
|
"""recovery in stream mode to latest from full + ptrack backups"""
|
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-03 13:14:48 +02:00
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
2018-03-23 12:23:17 +02:00
|
|
|
pg_options={
|
|
|
|
'wal_level': 'replica',
|
|
|
|
'ptrack_enable': 'on',
|
|
|
|
'max_wal_senders': '2'}
|
2017-05-03 13:14:48 +02:00
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-06-20 12:57:23 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
node.pgbench_init(scale=2)
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node, options=["--stream"])
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
2017-05-03 13:14:48 +02:00
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type="ptrack", options=["--stream"])
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
|
2017-06-07 16:52:07 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
2018-04-11 18:47:19 +02:00
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=["-j", "4", "--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
|
|
|
self.assertEqual(before, after)
|
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-05-03 13:14:48 +02:00
|
|
|
def test_restore_full_ptrack_under_load(self):
|
2018-03-23 12:23:17 +02:00
|
|
|
"""
|
|
|
|
recovery to latest from full + ptrack backups
|
|
|
|
with loads when ptrack backup do
|
|
|
|
"""
|
2017-05-03 13:14:48 +02:00
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-05 15:21:49 +02:00
|
|
|
set_replication=True,
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
2018-03-23 12:23:17 +02:00
|
|
|
pg_options={
|
|
|
|
'wal_level': 'replica',
|
|
|
|
'ptrack_enable': 'on',
|
|
|
|
'max_wal_senders': '2'}
|
2017-05-03 13:14:48 +02:00
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-06-20 12:57:23 +02:00
|
|
|
|
2017-05-03 13:14:48 +02:00
|
|
|
node.pgbench_init(scale=2)
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
options=["-c", "4", "-T", "8"]
|
|
|
|
)
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type="ptrack", options=["--stream"])
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
bbalance = node.execute(
|
|
|
|
"postgres", "SELECT sum(bbalance) FROM pgbench_branches")
|
|
|
|
delta = node.execute(
|
|
|
|
"postgres", "SELECT sum(delta) FROM pgbench_history")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
self.assertEqual(bbalance, delta)
|
2017-09-28 09:32:06 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
2018-04-11 18:47:19 +02:00
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=["-j", "4", "--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2018-03-23 12:23:17 +02:00
|
|
|
bbalance = node.execute(
|
|
|
|
"postgres", "SELECT sum(bbalance) FROM pgbench_branches")
|
|
|
|
delta = node.execute(
|
|
|
|
"postgres", "SELECT sum(delta) FROM pgbench_history")
|
2017-05-03 13:14:48 +02:00
|
|
|
self.assertEqual(bbalance, delta)
|
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-05-03 13:14:48 +02:00
|
|
|
def test_restore_full_under_load_ptrack(self):
|
2018-03-23 12:23:17 +02:00
|
|
|
"""
|
|
|
|
recovery to latest from full + page backups
|
|
|
|
with loads when full backup do
|
|
|
|
"""
|
2017-05-03 13:14:48 +02:00
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-05 15:21:49 +02:00
|
|
|
set_replication=True,
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
2018-03-23 12:23:17 +02:00
|
|
|
pg_options={
|
|
|
|
'wal_level': 'replica',
|
|
|
|
'ptrack_enable': 'on',
|
|
|
|
'max_wal_senders': '2'}
|
2017-05-03 13:14:48 +02:00
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# wal_segment_size = self.guc_wal_segment_size(node)
|
|
|
|
node.pgbench_init(scale=2)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
options=["-c", "4", "-T", "8"]
|
|
|
|
)
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type="ptrack", options=["--stream"])
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
bbalance = node.execute(
|
|
|
|
"postgres", "SELECT sum(bbalance) FROM pgbench_branches")
|
|
|
|
delta = node.execute(
|
|
|
|
"postgres", "SELECT sum(delta) FROM pgbench_history")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
self.assertEqual(bbalance, delta)
|
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
node.stop()
|
2017-05-03 13:14:48 +02:00
|
|
|
node.cleanup()
|
2018-03-23 12:23:17 +02:00
|
|
|
# self.wrong_wal_clean(node, wal_segment_size)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
2018-04-11 18:47:19 +02:00
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=["-j", "4", "--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2018-03-23 12:23:17 +02:00
|
|
|
bbalance = node.execute(
|
|
|
|
"postgres", "SELECT sum(bbalance) FROM pgbench_branches")
|
|
|
|
delta = node.execute(
|
|
|
|
"postgres", "SELECT sum(delta) FROM pgbench_history")
|
2017-05-03 13:14:48 +02:00
|
|
|
self.assertEqual(bbalance, delta)
|
2017-06-27 07:42:52 +02:00
|
|
|
|
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-05-03 13:14:48 +02:00
|
|
|
def test_restore_with_tablespace_mapping_1(self):
|
|
|
|
"""recovery using tablespace-mapping option"""
|
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
2018-03-23 12:23:17 +02:00
|
|
|
pg_options={
|
|
|
|
'wal_level': 'replica',
|
|
|
|
'ptrack_enable': 'on',
|
|
|
|
'max_wal_senders': '2'}
|
2017-05-03 13:14:48 +02:00
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# Create tablespace
|
2017-06-20 12:57:23 +02:00
|
|
|
tblspc_path = os.path.join(node.base_dir, "tblspc")
|
2017-05-03 13:14:48 +02:00
|
|
|
os.makedirs(tblspc_path)
|
|
|
|
with node.connect("postgres") as con:
|
|
|
|
con.connection.autocommit = True
|
|
|
|
con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path)
|
|
|
|
con.connection.autocommit = False
|
|
|
|
con.execute("CREATE TABLE test (id int) TABLESPACE tblspc")
|
|
|
|
con.execute("INSERT INTO test VALUES (1)")
|
|
|
|
con.commit()
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
2018-06-02 19:35:37 +02:00
|
|
|
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# 1 - Try to restore to existing directory
|
|
|
|
node.stop()
|
|
|
|
try:
|
2017-06-20 12:57:23 +02:00
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
# we should die here because exception is what we expect to happen
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because restore destionation is not empty.\n "
|
|
|
|
"Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-06-27 07:42:52 +02:00
|
|
|
except ProbackupException as e:
|
2018-12-27 12:04:16 +02:00
|
|
|
self.assertIn(
|
|
|
|
'ERROR: restore destination is not empty: "{0}"'.format(node.data_dir),
|
2018-03-23 12:23:17 +02:00
|
|
|
e.message,
|
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# 2 - Try to restore to existing tablespace directory
|
2018-12-27 12:18:19 +02:00
|
|
|
tblspc_path_tmp = os.path.join(node.base_dir, "tblspc_tmp")
|
|
|
|
os.rename(tblspc_path, tblspc_path_tmp)
|
|
|
|
node.cleanup()
|
|
|
|
os.rename(tblspc_path_tmp, tblspc_path)
|
2017-05-03 13:14:48 +02:00
|
|
|
try:
|
2017-06-20 12:57:23 +02:00
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
# we should die here because exception is what we expect to happen
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because restore tablespace destination is "
|
|
|
|
"not empty.\n Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-06-27 07:42:52 +02:00
|
|
|
except ProbackupException as e:
|
2018-12-27 12:18:19 +02:00
|
|
|
self.assertIn(
|
|
|
|
'ERROR: restore tablespace destination is not empty:',
|
2018-03-23 12:23:17 +02:00
|
|
|
e.message,
|
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# 3 - Restore using tablespace-mapping
|
2017-06-20 12:57:23 +02:00
|
|
|
tblspc_path_new = os.path.join(node.base_dir, "tblspc_new")
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
2018-04-11 18:47:19 +02:00
|
|
|
options=[
|
|
|
|
"-T", "%s=%s" % (tblspc_path, tblspc_path_new),
|
|
|
|
"--recovery-target-action=promote"]
|
|
|
|
),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
result = node.execute("postgres", "SELECT id FROM test")
|
|
|
|
self.assertEqual(result[0][0], 1)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# 4 - Restore using tablespace-mapping using page backup
|
2017-06-20 12:57:23 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node)
|
2017-05-03 13:14:48 +02:00
|
|
|
with node.connect("postgres") as con:
|
|
|
|
con.execute("INSERT INTO test VALUES (2)")
|
|
|
|
con.commit()
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type="page")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
show_pb = self.show_pb(backup_dir, 'node')
|
2018-06-02 19:35:37 +02:00
|
|
|
self.assertEqual(show_pb[1]['status'], "OK")
|
|
|
|
self.assertEqual(show_pb[2]['status'], "OK")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
node.stop()
|
|
|
|
node.cleanup()
|
2017-06-20 12:57:23 +02:00
|
|
|
tblspc_path_page = os.path.join(node.base_dir, "tblspc_page")
|
2017-05-22 13:17:43 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
2018-04-11 18:47:19 +02:00
|
|
|
options=[
|
|
|
|
"-T", "%s=%s" % (tblspc_path_new, tblspc_path_page),
|
|
|
|
"--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-06-27 07:42:52 +02:00
|
|
|
result = node.execute("postgres", "SELECT id FROM test OFFSET 1")
|
|
|
|
self.assertEqual(result[0][0], 2)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-05-03 13:14:48 +02:00
|
|
|
def test_restore_with_tablespace_mapping_2(self):
|
|
|
|
"""recovery using tablespace-mapping option and page backup"""
|
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-03 13:14:48 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# Full backup
|
2017-06-20 12:57:23 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node)
|
2018-06-02 19:35:37 +02:00
|
|
|
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# Create tablespace
|
2017-06-20 12:57:23 +02:00
|
|
|
tblspc_path = os.path.join(node.base_dir, "tblspc")
|
2017-05-03 13:14:48 +02:00
|
|
|
os.makedirs(tblspc_path)
|
|
|
|
with node.connect("postgres") as con:
|
|
|
|
con.connection.autocommit = True
|
|
|
|
con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path)
|
|
|
|
con.connection.autocommit = False
|
2018-04-11 18:47:19 +02:00
|
|
|
con.execute(
|
|
|
|
"CREATE TABLE tbl AS SELECT * "
|
|
|
|
"FROM generate_series(0,3) AS integer")
|
2017-05-03 13:14:48 +02:00
|
|
|
con.commit()
|
|
|
|
|
|
|
|
# First page backup
|
2017-06-20 12:57:23 +02:00
|
|
|
self.backup_node(backup_dir, 'node', node, backup_type="page")
|
2018-06-02 19:35:37 +02:00
|
|
|
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")
|
|
|
|
self.assertEqual(
|
|
|
|
self.show_pb(backup_dir, 'node')[1]['backup-mode'], "PAGE")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
# Create tablespace table
|
|
|
|
with node.connect("postgres") as con:
|
|
|
|
con.connection.autocommit = True
|
|
|
|
con.execute("CHECKPOINT")
|
|
|
|
con.connection.autocommit = False
|
|
|
|
con.execute("CREATE TABLE tbl1 (a int) TABLESPACE tblspc")
|
2018-04-11 18:47:19 +02:00
|
|
|
con.execute(
|
|
|
|
"INSERT INTO tbl1 SELECT * "
|
|
|
|
"FROM generate_series(0,3) AS integer")
|
2017-05-03 13:14:48 +02:00
|
|
|
con.commit()
|
|
|
|
|
|
|
|
# Second page backup
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type="page")
|
2018-06-02 19:35:37 +02:00
|
|
|
self.assertEqual(self.show_pb(backup_dir, 'node')[2]['status'], "OK")
|
|
|
|
self.assertEqual(
|
|
|
|
self.show_pb(backup_dir, 'node')[2]['backup-mode'], "PAGE")
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
node.stop()
|
|
|
|
node.cleanup()
|
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
tblspc_path_new = os.path.join(node.base_dir, "tblspc_new")
|
2017-05-22 13:17:43 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
2018-04-11 18:47:19 +02:00
|
|
|
options=[
|
|
|
|
"-T", "%s=%s" % (tblspc_path, tblspc_path_new),
|
|
|
|
"--recovery-target-action=promote"]),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
|
|
|
|
count = node.execute("postgres", "SELECT count(*) FROM tbl")
|
|
|
|
self.assertEqual(count[0][0], 4)
|
|
|
|
count = node.execute("postgres", "SELECT count(*) FROM tbl1")
|
|
|
|
self.assertEqual(count[0][0], 4)
|
2017-05-18 12:01:30 +02:00
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-06-27 07:42:52 +02:00
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-05-18 12:01:30 +02:00
|
|
|
def test_archive_node_backup_stream_restore_to_recovery_time(self):
|
2018-03-23 12:23:17 +02:00
|
|
|
"""
|
|
|
|
make node with archiving, make stream backup,
|
|
|
|
make PITR to Recovery Time
|
|
|
|
"""
|
2017-05-18 12:01:30 +02:00
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-18 12:01:30 +02:00
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
2017-06-27 07:42:52 +02:00
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-05-18 12:01:30 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=["--stream"])
|
2017-07-12 16:28:28 +02:00
|
|
|
node.safe_psql("postgres", "create table t_heap(a int)")
|
|
|
|
node.safe_psql("postgres", "select pg_switch_xlog()")
|
2017-06-07 16:52:07 +02:00
|
|
|
node.stop()
|
2017-05-18 12:01:30 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
recovery_time = self.show_pb(
|
|
|
|
backup_dir, 'node', backup_id)['recovery-time']
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
2018-04-11 18:47:19 +02:00
|
|
|
options=[
|
|
|
|
"-j", "4", '--time={0}'.format(recovery_time),
|
|
|
|
"--recovery-target-action=promote"
|
|
|
|
]
|
|
|
|
),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-22 13:17:43 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
result = node.psql("postgres", 'select * from t_heap')
|
2017-07-12 16:28:28 +02:00
|
|
|
self.assertTrue('does not exist' in result[2].decode("utf-8"))
|
2017-06-27 07:42:52 +02:00
|
|
|
|
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-06-27 07:42:52 +02:00
|
|
|
|
2017-09-28 09:32:06 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-10-09 14:32:48 +02:00
|
|
|
# @unittest.expectedFailure
|
2017-06-27 07:42:52 +02:00
|
|
|
def test_archive_node_backup_stream_restore_to_recovery_time(self):
|
2018-03-23 12:23:17 +02:00
|
|
|
"""
|
|
|
|
make node with archiving, make stream backup,
|
|
|
|
make PITR to Recovery Time
|
|
|
|
"""
|
2017-06-27 07:42:52 +02:00
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-06-27 07:42:52 +02:00
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-27 07:42:52 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-06-27 07:42:52 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=["--stream"])
|
2017-07-12 16:28:28 +02:00
|
|
|
node.safe_psql("postgres", "create table t_heap(a int)")
|
2017-05-18 12:01:30 +02:00
|
|
|
node.stop()
|
2017-06-27 07:42:52 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
recovery_time = self.show_pb(
|
|
|
|
backup_dir, 'node', backup_id)['recovery-time']
|
2017-06-27 07:42:52 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
2018-04-11 18:47:19 +02:00
|
|
|
options=[
|
|
|
|
"-j", "4", '--time={0}'.format(recovery_time),
|
|
|
|
"--recovery-target-action=promote"
|
|
|
|
]
|
|
|
|
),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-06-27 07:42:52 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-06-27 07:42:52 +02:00
|
|
|
result = node.psql("postgres", 'select * from t_heap')
|
2017-07-12 16:28:28 +02:00
|
|
|
self.assertTrue('does not exist' in result[2].decode("utf-8"))
|
2017-06-27 07:42:52 +02:00
|
|
|
|
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-05-18 12:01:30 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-10-09 14:32:48 +02:00
|
|
|
# @unittest.expectedFailure
|
2017-06-07 16:52:07 +02:00
|
|
|
def test_archive_node_backup_stream_pitr(self):
|
2018-03-23 12:23:17 +02:00
|
|
|
"""
|
|
|
|
make node with archiving, make stream backup,
|
|
|
|
create table t_heap, make pitr to Recovery Time,
|
|
|
|
check that t_heap do not exists
|
|
|
|
"""
|
2017-06-07 16:52:07 +02:00
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-06-07 16:52:07 +02:00
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=["--stream"])
|
2017-07-12 16:28:28 +02:00
|
|
|
node.safe_psql("postgres", "create table t_heap(a int)")
|
2017-06-07 16:52:07 +02:00
|
|
|
node.cleanup()
|
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
recovery_time = self.show_pb(
|
|
|
|
backup_dir, 'node', backup_id)['recovery-time']
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
2018-04-11 18:47:19 +02:00
|
|
|
options=[
|
|
|
|
"-j", "4", '--time={0}'.format(recovery_time),
|
|
|
|
"--recovery-target-action=promote"
|
|
|
|
]
|
|
|
|
),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2017-06-27 07:42:52 +02:00
|
|
|
result = node.psql("postgres", 'select * from t_heap')
|
|
|
|
self.assertEqual(True, 'does not exist' in result[2].decode("utf-8"))
|
|
|
|
|
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
# @unittest.skip("skip")
|
2017-10-09 14:32:48 +02:00
|
|
|
# @unittest.expectedFailure
|
2017-06-20 12:57:23 +02:00
|
|
|
def test_archive_node_backup_archive_pitr_2(self):
|
2018-03-23 12:23:17 +02:00
|
|
|
"""
|
|
|
|
make node with archiving, make archive backup,
|
|
|
|
create table t_heap, make pitr to Recovery Time,
|
|
|
|
check that t_heap do not exists
|
|
|
|
"""
|
2017-05-18 12:01:30 +02:00
|
|
|
fname = self.id().split('.')[3]
|
2018-03-23 12:23:17 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-18 12:01:30 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
2017-06-20 12:57:23 +02:00
|
|
|
pg_options={'wal_level': 'replica'}
|
2017-05-18 12:01:30 +02:00
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-20 12:57:23 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-05-18 12:01:30 +02:00
|
|
|
|
2017-06-20 12:57:23 +02:00
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
2018-03-03 10:42:10 +02:00
|
|
|
if self.paranoia:
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
2017-07-12 16:28:28 +02:00
|
|
|
node.safe_psql("postgres", "create table t_heap(a int)")
|
2017-09-28 09:32:06 +02:00
|
|
|
node.stop()
|
2019-02-18 16:20:13 +02:00
|
|
|
|
|
|
|
node_restored = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node_restored'))
|
|
|
|
node_restored.cleanup()
|
2017-05-22 13:17:43 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
recovery_time = self.show_pb(
|
|
|
|
backup_dir, 'node', backup_id)['recovery-time']
|
2017-06-07 16:52:07 +02:00
|
|
|
|
2018-03-23 12:23:17 +02:00
|
|
|
self.assertIn(
|
|
|
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
|
|
|
self.restore_node(
|
2019-02-18 16:20:13 +02:00
|
|
|
backup_dir, 'node', node_restored,
|
2018-04-11 18:47:19 +02:00
|
|
|
options=[
|
|
|
|
"-j", "4", '--time={0}'.format(recovery_time),
|
2019-02-26 20:26:30 +02:00
|
|
|
"--recovery-target-action=promote"]
|
2018-04-11 18:47:19 +02:00
|
|
|
),
|
2018-03-23 12:23:17 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-05-22 13:17:43 +02:00
|
|
|
|
2018-03-03 10:42:10 +02:00
|
|
|
if self.paranoia:
|
2019-02-18 16:20:13 +02:00
|
|
|
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
2018-03-03 10:42:10 +02:00
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
2019-02-26 20:26:30 +02:00
|
|
|
node_restored.append_conf(
|
|
|
|
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
|
|
|
|
2019-02-18 16:20:13 +02:00
|
|
|
node_restored.slow_start()
|
2017-05-03 13:14:48 +02:00
|
|
|
|
2019-02-18 16:20:13 +02:00
|
|
|
result = node_restored.psql("postgres", 'select * from t_heap')
|
2017-06-27 07:42:52 +02:00
|
|
|
self.assertTrue('does not exist' in result[2].decode("utf-8"))
|
|
|
|
|
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2018-03-23 12:23:17 +02:00
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
# @unittest.expectedFailure
|
|
|
|
def test_archive_restore_to_restore_point(self):
|
|
|
|
"""
|
|
|
|
make node with archiving, make archive backup,
|
|
|
|
create table t_heap, make pitr to Recovery Time,
|
|
|
|
check that t_heap do not exists
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-03-23 12:23:17 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-03-23 12:23:17 +02:00
|
|
|
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap as select generate_series(0,10000)")
|
|
|
|
result = node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select * from t_heap")
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres", "select pg_create_restore_point('savepoint')")
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap_1 as select generate_series(0,10000)")
|
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=[
|
|
|
|
"--recovery-target-name=savepoint",
|
|
|
|
"--recovery-target-action=promote"])
|
|
|
|
|
2018-07-11 09:50:38 +02:00
|
|
|
node.slow_start()
|
2018-03-23 12:23:17 +02:00
|
|
|
|
|
|
|
result_new = node.safe_psql("postgres", "select * from t_heap")
|
|
|
|
res = node.psql("postgres", "select * from t_heap_1")
|
|
|
|
self.assertEqual(
|
|
|
|
res[0], 1,
|
|
|
|
"Table t_heap_1 should not exist in restored instance")
|
|
|
|
|
|
|
|
self.assertEqual(result, result_new)
|
|
|
|
|
|
|
|
# Clean after yourself
|
2018-05-03 13:12:19 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2018-10-01 20:44:21 +02:00
|
|
|
|
2018-10-11 15:35:40 +02:00
|
|
|
@unittest.skip("skip")
|
2018-10-01 20:44:21 +02:00
|
|
|
# @unittest.expectedFailure
|
|
|
|
def test_zags_block_corrupt(self):
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-10-01 20:44:21 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-10-01 20:44:21 +02:00
|
|
|
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
conn = node.connect()
|
|
|
|
with node.connect("postgres") as conn:
|
|
|
|
|
|
|
|
conn.execute(
|
|
|
|
"create table tbl(i int)")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"create index idx ON tbl (i)")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"insert into tbl select i from generate_series(0,400) as i")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"select pg_relation_size('idx')")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"delete from tbl where i < 100")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"explain analyze select i from tbl order by i")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"select i from tbl order by i")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"create extension pageinspect")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
print(conn.execute(
|
|
|
|
"select * from bt_page_stats('idx',1)"))
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"insert into tbl select i from generate_series(0,100) as i")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"insert into tbl select i from generate_series(0,100) as i")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"insert into tbl select i from generate_series(0,100) as i")
|
2018-10-02 10:55:37 +02:00
|
|
|
conn.commit()
|
2018-10-01 20:44:21 +02:00
|
|
|
conn.execute(
|
|
|
|
"insert into tbl select i from generate_series(0,100) as i")
|
|
|
|
|
|
|
|
|
|
|
|
node_restored = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node_restored'),
|
2018-10-01 20:44:21 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
|
|
|
|
|
|
|
node_restored.cleanup()
|
|
|
|
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node_restored)
|
|
|
|
|
|
|
|
node_restored.append_conf("postgresql.auto.conf", "archive_mode = 'off'")
|
2018-10-11 15:35:40 +02:00
|
|
|
node_restored.append_conf("postgresql.auto.conf", "hot_standby = 'on'")
|
2018-10-01 20:44:21 +02:00
|
|
|
node_restored.append_conf(
|
|
|
|
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
|
|
|
|
|
|
|
node_restored.slow_start()
|
2018-10-11 15:35:40 +02:00
|
|
|
|
|
|
|
@unittest.skip("skip")
|
|
|
|
# @unittest.expectedFailure
|
|
|
|
def test_zags_block_corrupt_1(self):
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-10-11 15:35:40 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={
|
|
|
|
'wal_level': 'replica',
|
|
|
|
'autovacuum': 'off',
|
2018-10-15 11:27:51 +02:00
|
|
|
'full_page_writes': 'on'}
|
2018-10-11 15:35:40 +02:00
|
|
|
)
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-10-11 15:35:40 +02:00
|
|
|
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
node.safe_psql('postgres', 'create table tbl(i int)')
|
|
|
|
|
|
|
|
node.safe_psql('postgres', 'create index idx ON tbl (i)')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'insert into tbl select i from generate_series(0,100000) as i')
|
|
|
|
|
|
|
|
print(node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
"select pg_relation_size('idx')"))
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'delete from tbl where i%2 = 0')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'explain analyze select i from tbl order by i')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'select i from tbl order by i')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'create extension pageinspect')
|
|
|
|
|
|
|
|
print(node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
"select * from bt_page_stats('idx',1)"))
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'checkpoint')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'insert into tbl select i from generate_series(0,100) as i')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'insert into tbl select i from generate_series(0,100) as i')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'insert into tbl select i from generate_series(0,100) as i')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'insert into tbl select i from generate_series(0,100) as i')
|
|
|
|
|
|
|
|
self.switch_wal_segment(node)
|
|
|
|
|
|
|
|
node_restored = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node_restored'),
|
2018-10-11 15:35:40 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
node_restored.cleanup()
|
|
|
|
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node_restored)
|
|
|
|
|
|
|
|
node_restored.append_conf("postgresql.auto.conf", "archive_mode = 'off'")
|
|
|
|
node_restored.append_conf("postgresql.auto.conf", "hot_standby = 'on'")
|
|
|
|
node_restored.append_conf(
|
|
|
|
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
|
|
|
|
|
|
|
node_restored.slow_start()
|
|
|
|
|
|
|
|
while True:
|
|
|
|
with open(node_restored.pg_log_file, 'r') as f:
|
|
|
|
if 'selected new timeline ID' in f.read():
|
|
|
|
break
|
|
|
|
|
|
|
|
with open(node_restored.pg_log_file, 'r') as f:
|
|
|
|
print(f.read())
|
|
|
|
|
|
|
|
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
|
|
|
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# pg_xlogdump_path = self.get_bin_path('pg_xlogdump')
|
|
|
|
|
|
|
|
# pg_xlogdump = self.run_binary(
|
|
|
|
# [
|
|
|
|
# pg_xlogdump_path, '-b',
|
|
|
|
# os.path.join(backup_dir, 'wal', 'node', '000000010000000000000003'),
|
|
|
|
# ' | ', 'grep', 'Btree', ''
|
|
|
|
# ], async=False)
|
|
|
|
|
|
|
|
if pg_xlogdump.returncode:
|
|
|
|
self.assertFalse(
|
|
|
|
True,
|
|
|
|
'Failed to start pg_wal_dump: {0}'.format(
|
|
|
|
pg_receivexlog.communicate()[1]))
|
2019-02-09 00:59:43 +02:00
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_restore_chain(self):
|
|
|
|
"""
|
|
|
|
make node, take full backup, take several
|
|
|
|
ERROR delta backups, take valid delta backup,
|
|
|
|
restore must be successfull
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={
|
|
|
|
'wal_level': 'replica',
|
|
|
|
'max_wal_senders': '2'})
|
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
2019-02-09 01:30:22 +02:00
|
|
|
# Take FULL
|
2019-02-09 00:59:43 +02:00
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node)
|
|
|
|
|
2019-02-09 01:30:22 +02:00
|
|
|
# Take DELTA
|
2019-02-09 00:59:43 +02:00
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
2019-02-09 01:30:22 +02:00
|
|
|
# Take ERROR DELTA
|
2019-02-09 00:59:43 +02:00
|
|
|
try:
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='delta', options=['--archive-timeout=0s'])
|
|
|
|
except ProbackupException as e:
|
|
|
|
pass
|
|
|
|
|
2019-02-09 01:30:22 +02:00
|
|
|
# Take ERROR DELTA
|
2019-02-09 00:59:43 +02:00
|
|
|
try:
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='delta', options=['--archive-timeout=0s'])
|
|
|
|
except ProbackupException as e:
|
|
|
|
pass
|
|
|
|
|
2019-02-09 01:30:22 +02:00
|
|
|
# Take DELTA
|
2019-02-09 00:59:43 +02:00
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
2019-02-09 01:30:22 +02:00
|
|
|
# Take ERROR DELTA
|
2019-02-09 00:59:43 +02:00
|
|
|
try:
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='delta', options=['--archive-timeout=0s'])
|
|
|
|
except ProbackupException as e:
|
|
|
|
pass
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'OK',
|
|
|
|
self.show_pb(backup_dir, 'node')[0]['status'],
|
|
|
|
'Backup STATUS should be "OK"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'OK',
|
|
|
|
self.show_pb(backup_dir, 'node')[1]['status'],
|
|
|
|
'Backup STATUS should be "OK"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'ERROR',
|
|
|
|
self.show_pb(backup_dir, 'node')[2]['status'],
|
|
|
|
'Backup STATUS should be "ERROR"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'ERROR',
|
|
|
|
self.show_pb(backup_dir, 'node')[3]['status'],
|
|
|
|
'Backup STATUS should be "ERROR"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'OK',
|
|
|
|
self.show_pb(backup_dir, 'node')[4]['status'],
|
|
|
|
'Backup STATUS should be "OK"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'ERROR',
|
|
|
|
self.show_pb(backup_dir, 'node')[5]['status'],
|
|
|
|
'Backup STATUS should be "ERROR"')
|
|
|
|
|
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_restore_chain_with_corrupted_backup(self):
|
|
|
|
"""more complex test_restore_chain()"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={
|
|
|
|
'wal_level': 'replica',
|
|
|
|
'max_wal_senders': '2'})
|
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Take FULL
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node)
|
|
|
|
|
|
|
|
# Take DELTA
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# Take ERROR DELTA
|
|
|
|
try:
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='page', options=['--archive-timeout=0s'])
|
|
|
|
except ProbackupException as e:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Take 1 DELTA
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
# Take ERROR DELTA
|
|
|
|
try:
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='delta', options=['--archive-timeout=0s'])
|
|
|
|
except ProbackupException as e:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Take 2 DELTA
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
# Take ERROR DELTA
|
|
|
|
try:
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='delta', options=['--archive-timeout=0s'])
|
|
|
|
except ProbackupException as e:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Take 3 DELTA
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
# Corrupted 4 DELTA
|
|
|
|
corrupt_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
# ORPHAN 5 DELTA
|
|
|
|
restore_target_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
# ORPHAN 6 DELTA
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
# NEXT FULL BACKUP
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='full')
|
|
|
|
|
|
|
|
# Next Delta
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
# do corrupt 6 DELTA backup
|
|
|
|
file = os.path.join(
|
|
|
|
backup_dir, 'backups', 'node',
|
|
|
|
corrupt_id, 'database', 'global', 'pg_control')
|
|
|
|
|
|
|
|
file_new = os.path.join(backup_dir, 'pg_control')
|
|
|
|
os.rename(file, file_new)
|
|
|
|
|
|
|
|
# RESTORE BACKUP
|
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node, backup_id=restore_target_id)
|
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because restore backup is corrupted.\n "
|
|
|
|
"Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
|
|
|
except ProbackupException as e:
|
|
|
|
self.assertIn(
|
|
|
|
'ERROR: Backup {0} is orphan'.format(restore_target_id),
|
|
|
|
e.message,
|
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'OK',
|
|
|
|
self.show_pb(backup_dir, 'node')[0]['status'],
|
|
|
|
'Backup STATUS should be "OK"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'OK',
|
|
|
|
self.show_pb(backup_dir, 'node')[1]['status'],
|
|
|
|
'Backup STATUS should be "OK"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'ERROR',
|
|
|
|
self.show_pb(backup_dir, 'node')[2]['status'],
|
|
|
|
'Backup STATUS should be "ERROR"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'OK',
|
|
|
|
self.show_pb(backup_dir, 'node')[3]['status'],
|
|
|
|
'Backup STATUS should be "OK"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'ERROR',
|
|
|
|
self.show_pb(backup_dir, 'node')[4]['status'],
|
|
|
|
'Backup STATUS should be "ERROR"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'OK',
|
|
|
|
self.show_pb(backup_dir, 'node')[5]['status'],
|
|
|
|
'Backup STATUS should be "OK"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'ERROR',
|
|
|
|
self.show_pb(backup_dir, 'node')[6]['status'],
|
|
|
|
'Backup STATUS should be "ERROR"')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'OK',
|
|
|
|
self.show_pb(backup_dir, 'node')[7]['status'],
|
|
|
|
'Backup STATUS should be "OK"')
|
|
|
|
|
|
|
|
# corruption victim
|
|
|
|
self.assertEqual(
|
|
|
|
'CORRUPT',
|
|
|
|
self.show_pb(backup_dir, 'node')[8]['status'],
|
|
|
|
'Backup STATUS should be "CORRUPT"')
|
|
|
|
|
|
|
|
# orphaned child
|
|
|
|
self.assertEqual(
|
|
|
|
'ORPHAN',
|
|
|
|
self.show_pb(backup_dir, 'node')[9]['status'],
|
|
|
|
'Backup STATUS should be "ORPHAN"')
|
|
|
|
|
|
|
|
# orphaned child
|
|
|
|
self.assertEqual(
|
|
|
|
'ORPHAN',
|
|
|
|
self.show_pb(backup_dir, 'node')[10]['status'],
|
|
|
|
'Backup STATUS should be "ORPHAN"')
|
|
|
|
|
|
|
|
# next FULL
|
|
|
|
self.assertEqual(
|
|
|
|
'OK',
|
|
|
|
self.show_pb(backup_dir, 'node')[11]['status'],
|
|
|
|
'Backup STATUS should be "OK"')
|
|
|
|
|
|
|
|
# next DELTA
|
|
|
|
self.assertEqual(
|
|
|
|
'OK',
|
|
|
|
self.show_pb(backup_dir, 'node')[12]['status'],
|
|
|
|
'Backup STATUS should be "OK"')
|
|
|
|
|
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
2019-03-27 17:16:53 +02:00
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_restore_backup_from_future(self):
|
|
|
|
"""more complex test_restore_chain()"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={
|
|
|
|
'wal_level': 'replica',
|
|
|
|
'max_wal_senders': '2'})
|
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Take FULL
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
node.pgbench_init(scale=3)
|
|
|
|
#pgbench = node.pgbench(options=['-T', '20', '-c', '2'])
|
|
|
|
#pgbench.wait()
|
|
|
|
|
|
|
|
# Take PAGE from future
|
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
with open(
|
|
|
|
os.path.join(
|
|
|
|
backup_dir, 'backups', 'node',
|
|
|
|
backup_id, "backup.control"), "a") as conf:
|
|
|
|
conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format(
|
|
|
|
datetime.now() + timedelta(days=3)))
|
|
|
|
|
|
|
|
# rename directory
|
|
|
|
new_id = self.show_pb(backup_dir, 'node')[1]['id']
|
|
|
|
|
|
|
|
os.rename(
|
|
|
|
os.path.join(backup_dir, 'backups', 'node', backup_id),
|
|
|
|
os.path.join(backup_dir, 'backups', 'node', new_id))
|
|
|
|
|
|
|
|
pgbench = node.pgbench(options=['-T', '3', '-c', '2', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(backup_dir, 'node', node, backup_id=backup_id)
|
|
|
|
|
|
|
|
pgdata_restored = self.pgdata_content(node.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
2019-04-13 15:50:50 +02:00
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_restore_target_immediate_stream(self):
|
|
|
|
"""more complex test_restore_chain()"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'])
|
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Take FULL
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
|
|
|
# Take delta
|
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='delta', options=['--stream'])
|
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
recovery_conf = os.path.join(node.data_dir, 'recovery.conf')
|
|
|
|
|
|
|
|
# restore page backup
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node, options=['--immediate'])
|
|
|
|
|
|
|
|
# For stream backup with immediate recovery target there is no need to
|
|
|
|
# create recovery.conf. Is it wise?
|
|
|
|
self.assertFalse(
|
|
|
|
os.path.isfile(recovery_conf))
|
|
|
|
|
|
|
|
# restore page backup
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node, options=['--recovery-target=immediate'])
|
|
|
|
|
|
|
|
# For stream backup with immediate recovery target there is no need to
|
|
|
|
# create recovery.conf. Is it wise?
|
|
|
|
self.assertFalse(
|
|
|
|
os.path.isfile(recovery_conf))
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_restore_target_immediate_archive(self):
|
|
|
|
"""more complex test_restore_chain()"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'])
|
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Take FULL
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node)
|
|
|
|
|
|
|
|
# Take delta
|
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='delta')
|
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
recovery_conf = os.path.join(node.data_dir, 'recovery.conf')
|
|
|
|
|
|
|
|
# restore page backup
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node, options=['--immediate'])
|
|
|
|
|
|
|
|
# For archive backup with immediate recovery target
|
|
|
|
# recovery.conf is mandatory
|
|
|
|
with open(recovery_conf, 'r') as f:
|
|
|
|
self.assertIn("recovery_target = 'immediate'", f.read())
|
|
|
|
|
|
|
|
# restore page backup
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node, options=['--recovery-target=immediate'])
|
|
|
|
|
|
|
|
# For archive backup with immediate recovery target
|
|
|
|
# recovery.conf is mandatory
|
|
|
|
with open(recovery_conf, 'r') as f:
|
|
|
|
self.assertIn("recovery_target = 'immediate'", f.read())
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_restore_target_latest_archive(self):
|
|
|
|
"""more complex test_restore_chain()"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'])
|
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Take FULL
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node)
|
|
|
|
|
|
|
|
recovery_conf = os.path.join(node.data_dir, 'recovery.conf')
|
|
|
|
|
|
|
|
# restore
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node)
|
|
|
|
|
|
|
|
with open(recovery_conf, 'r') as f:
|
|
|
|
print(f.read())
|
|
|
|
|
|
|
|
hash_1 = hashlib.md5(
|
|
|
|
open(recovery_conf, 'rb').read()).hexdigest()
|
|
|
|
|
|
|
|
# restore
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node, options=['--recovery-target=latest'])
|
|
|
|
|
|
|
|
with open(recovery_conf, 'r') as f:
|
|
|
|
print(f.read())
|
|
|
|
|
|
|
|
hash_2 = hashlib.md5(
|
|
|
|
open(recovery_conf, 'rb').read()).hexdigest()
|
|
|
|
|
|
|
|
self.assertEqual(hash_1, hash_2)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|