1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-02-08 14:28:36 +02:00

Start work on restore tests.

This commit is contained in:
stalkerg 2016-12-13 20:45:50 +03:00
parent 3c8242966a
commit 856e110eaf
3 changed files with 358 additions and 2 deletions

View File

@ -1,6 +1,6 @@
import unittest
from . import init_test, option_test, show_test, backup_test, delete_test
from . import init_test, option_test, show_test, backup_test, delete_test, restore_test
def load_tests(loader, tests, pattern):
@ -10,5 +10,6 @@ def load_tests(loader, tests, pattern):
suite.addTests(loader.loadTestsFromModule(show_test))
suite.addTests(loader.loadTestsFromModule(backup_test))
suite.addTests(loader.loadTestsFromModule(delete_test))
suite.addTests(loader.loadTestsFromModule(restore_test))
return suite

View File

@ -35,6 +35,31 @@ class ShowBackup(object):
class ProbackupTest(object):
def __init__(self, *args, **kwargs):
super(ProbackupTest, self).__init__(*args, **kwargs)
self.test_env = os.environ.copy()
envs_list = [
"LANGUAGE",
"LC_ALL",
"PGCONNECT_TIMEOUT",
"PGDATA",
"PGDATABASE",
"PGHOSTADDR",
"PGREQUIRESSL",
"PGSERVICE",
"PGSSLMODE",
"PGUSER",
"PGPORT",
"PGHOST"
]
for e in envs_list:
try:
del self.test_env[e]
except:
pass
self.test_env["LC_MESSAGES"] = "C"
self.test_env["LC_TIME"] = "C"
self.dir_path = path.dirname(os.path.realpath(__file__))
try:
os.makedirs(path.join(self.dir_path, "tmp_dirs"))
@ -73,7 +98,8 @@ class ProbackupTest(object):
try:
return subprocess.check_output(
[self.probackup_path] + command,
stderr=subprocess.STDOUT
stderr=subprocess.STDOUT,
env=self.test_env
)
except subprocess.CalledProcessError as err:
return err.output
@ -102,6 +128,18 @@ class ProbackupTest(object):
# print(cmd_list)
return self.run_pb(cmd_list + options)
def restore_pb(self, node, id=None, options=[]):
cmd_list = [
"-D", node.data_dir,
"-B", self.backup_dir(node),
"restore"
]
if id:
cmd_list.append(id)
# print(cmd_list)
return self.run_pb(cmd_list + options)
def show_pb(self, node, id=None, options=[], as_text=False):
cmd_list = [
"-B", self.backup_dir(node),
@ -137,3 +175,28 @@ class ProbackupTest(object):
# print(cmd_list)
return self.run_pb(options + cmd_list)
def get_control_data(self, node):
pg_controldata = node.get_bin_path("pg_controldata")
out_data = {}
lines = subprocess.check_output(
[pg_controldata] + ["-D", node.data_dir],
stderr=subprocess.STDOUT,
env=self.test_env
).splitlines()
for l in lines:
key, value = l.split(b":", maxsplit=1)
out_data[key.strip()] = value.strip()
return out_data
def get_recovery_conf(self, node):
out_dict = {}
with open(path.join(node.data_dir, "recovery.conf"), "r") as recovery_conf:
for line in recovery_conf:
try:
key, value = line.split("=")
except:
continue
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
return out_dict

292
tests/restore_test.py Normal file
View File

@ -0,0 +1,292 @@
import unittest
from os import path
import six
from .pb_lib import ProbackupTest
from testgres import stop_all
import subprocess
from datetime import datetime
class RestoreTest(ProbackupTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
super(RestoreTest, self).__init__(*args, **kwargs)
@classmethod
def tearDownClass(cls):
stop_all()
def test_restore_to_latest_1(self):
"""recovery to latest from full backup"""
node = self.make_bnode('restore_to_latest_1', base_dir="tmp_dirs/restore/restore_to_latest_1")
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
node.pgbench_init(scale=2)
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, options=["--verbose"]))
node.pg_ctl("stop", {
"-D": node.data_dir,
"-w": None,
"-m": "immediate"
})
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
restore_log.write(self.restore_pb(node, options=["-j", "4", "--verbose"]))
node.pg_ctl("start", {
"-D": node.data_dir,
"-w": None,
"-t": "600"
})
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
node.stop()
def test_restore_to_latest_2(self):
"""recovery to latest from full + page backups"""
node = self.make_bnode('restore_to_latest_2', base_dir="tmp_dirs/restore/restore_to_latest_2")
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
node.pgbench_init(scale=2)
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, options=["--verbose"]))
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, backup_type="page", options=["--verbose"]))
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
node.pg_ctl("stop", {
"-D": node.data_dir,
"-w": None,
"-m": "immediate"
})
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
restore_log.write(self.restore_pb(node, options=["-j", "4", "--verbose"]))
node.pg_ctl("start", {
"-D": node.data_dir,
"-w": None,
"-t": "600"
})
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
node.stop()
def test_restore_to_timeline_3(self):
"""recovery to target timeline"""
node = self.make_bnode('restore_to_timeline_3', base_dir="tmp_dirs/restore/restore_to_timeline_3")
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
node.pgbench_init(scale=2)
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"]))
target_tli = int(self.get_control_data(node)[six.b("Latest checkpoint's TimeLineID")])
node.pg_ctl("stop", {
"-D": node.data_dir,
"-w": None,
"-m": "immediate"
})
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
restore_log.write(self.restore_pb(node, options=["-j", "4", "--verbose"]))
node.pg_ctl("start", {
"-D": node.data_dir,
"-w": None,
"-t": "600"
})
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"]))
node.pg_ctl("stop", {
"-D": node.data_dir,
"-w": None,
"-m": "immediate"
})
with open(path.join(node.logs_dir, "restore_2.log"), "wb") as restore_log:
restore_log.write(self.restore_pb(
node,
options=["-j", "4", "--verbose", "--timeline=%i" % target_tli]
))
recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"]
self.assertEqual(int(recovery_target_timeline), target_tli)
node.pg_ctl("start", {
"-D": node.data_dir,
"-w": None,
"-t": "600"
})
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
node.stop()
def test_restore_to_time_4(self):
"""recovery to target timeline"""
node = self.make_bnode('restore_to_time_4', base_dir="tmp_dirs/restore/restore_to_time_4")
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
node.pgbench_init(scale=2)
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"]))
target_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
node.pg_ctl("stop", {
"-D": node.data_dir,
"-w": None,
"-m": "immediate"
})
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
restore_log.write(self.restore_pb(
node,
options=["-j", "4", "--verbose", '--time="%s"' % target_time]
))
node.pg_ctl("start", {
"-D": node.data_dir,
"-w": None,
"-t": "600"
})
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
node.stop()
def test_restore_to_xid_5(self):
"""recovery to target xid"""
node = self.make_bnode('restore_to_xid_5', base_dir="tmp_dirs/restore/restore_to_xid_5")
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
node.pgbench_init(scale=2)
with node.connect("postgres") as con:
con.execute("CREATE TABLE tbl0005 (a text)")
con.commit()
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"]))
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
with node.connect("postgres") as con:
res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
con.commit()
target_xid = res[0][0]
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
# Enforce segment to be archived to ensure that recovery goes up to the
# wanted point. There is no way to ensure that all segments needed have
# been archived up to the xmin point saved earlier without that.
node.execute("postgres", "SELECT pg_switch_xlog()")
node.pg_ctl("stop", {
"-D": node.data_dir,
"-w": None,
"-m": "fast"
})
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
restore_log.write(self.restore_pb(
node,
options=["-j", "4", "--verbose", '--xid=%s' % target_xid]
))
node.pg_ctl("start", {
"-D": node.data_dir,
"-w": None,
"-t": "600"
})
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
node.stop()
def test_restore_full_ptrack_6(self):
"""recovery to latest from full + ptrack backups"""
node = self.make_bnode('restore_full_ptrack_6', base_dir="tmp_dirs/restore/restore_full_ptrack_6")
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
node.pgbench_init(scale=2)
is_ptrack = node.execute("postgres", "SELECT proname FROM pg_proc WHERE proname='pg_ptrack_clear'")
if not is_ptrack:
node.stop()
self.skipTest("ptrack not supported")
return
node.append_conf("postgresql.conf", "ptrack_enable = on")
node.restart()
with open(path.join(node.logs_dir, "backup_1.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, backup_type="full", options=["--verbose"]))
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
with open(path.join(node.logs_dir, "backup_2.log"), "wb") as backup_log:
backup_log.write(self.backup_pb(node, backup_type="ptrack", options=["--verbose"]))
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
node.pg_ctl("stop", {
"-D": node.data_dir,
"-w": None,
"-m": "immediate"
})
with open(path.join(node.logs_dir, "restore_1.log"), "wb") as restore_log:
restore_log.write(self.restore_pb(node, options=["-j", "4", "--verbose"]))
node.pg_ctl("start", {
"-D": node.data_dir,
"-w": None,
"-t": "600"
})
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
node.stop()