2016-12-09 20:30:22 +02:00
|
|
|
import os
|
2016-12-20 16:59:51 +02:00
|
|
|
from os import path, listdir
|
2016-12-09 20:30:22 +02:00
|
|
|
import subprocess
|
|
|
|
import shutil
|
|
|
|
import six
|
|
|
|
from testgres import get_new_node
|
|
|
|
|
|
|
|
|
|
|
|
def dir_files(base_dir):
|
|
|
|
out_list = []
|
|
|
|
for dir_name, subdir_list, file_list in os.walk(base_dir):
|
|
|
|
if dir_name != base_dir:
|
|
|
|
out_list.append(path.relpath(dir_name, base_dir))
|
|
|
|
for fname in file_list:
|
|
|
|
out_list.append(path.relpath(path.join(dir_name, fname), base_dir))
|
|
|
|
out_list.sort()
|
|
|
|
return out_list
|
|
|
|
|
|
|
|
|
2016-12-12 18:21:15 +02:00
|
|
|
class ShowBackup(object):
|
|
|
|
def __init__(self, split_line):
|
2017-02-14 16:45:18 +02:00
|
|
|
self.counter = 0
|
|
|
|
|
|
|
|
self.id = self.get_inc(split_line)
|
2016-12-12 18:21:15 +02:00
|
|
|
# TODO: parse to datetime
|
2017-03-22 10:46:31 +02:00
|
|
|
if len(split_line) == 12:
|
2017-02-14 16:45:18 +02:00
|
|
|
self.recovery_time = "%s %s" % (self.get_inc(split_line),
|
|
|
|
self.get_inc(split_line))
|
|
|
|
# if recovery time is '----'
|
|
|
|
else:
|
|
|
|
self.recovery_time = self.get_inc(split_line)
|
|
|
|
self.mode = self.get_inc(split_line)
|
|
|
|
self.cur_tli = self.get_inc(split_line)
|
|
|
|
# slash
|
|
|
|
self.counter += 1
|
|
|
|
self.parent_tli = self.get_inc(split_line)
|
2016-12-12 18:21:15 +02:00
|
|
|
# TODO: parse to interval
|
2017-02-14 16:45:18 +02:00
|
|
|
self.time = self.get_inc(split_line)
|
2016-12-12 18:21:15 +02:00
|
|
|
# TODO: maybe rename to size?
|
2017-02-14 16:45:18 +02:00
|
|
|
self.data = self.get_inc(split_line)
|
2017-03-22 10:46:31 +02:00
|
|
|
self.start_lsn = self.get_inc(split_line)
|
|
|
|
self.stop_lsn = self.get_inc(split_line)
|
2017-02-14 16:45:18 +02:00
|
|
|
self.status = self.get_inc(split_line)
|
|
|
|
|
|
|
|
def get_inc(self, split_line):
|
|
|
|
self.counter += 1
|
|
|
|
return split_line[self.counter - 1]
|
2016-12-12 18:21:15 +02:00
|
|
|
|
|
|
|
|
2016-12-09 20:30:22 +02:00
|
|
|
class ProbackupTest(object):
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super(ProbackupTest, self).__init__(*args, **kwargs)
|
2016-12-13 19:45:50 +02:00
|
|
|
self.test_env = os.environ.copy()
|
|
|
|
envs_list = [
|
|
|
|
"LANGUAGE",
|
|
|
|
"LC_ALL",
|
|
|
|
"PGCONNECT_TIMEOUT",
|
|
|
|
"PGDATA",
|
|
|
|
"PGDATABASE",
|
|
|
|
"PGHOSTADDR",
|
|
|
|
"PGREQUIRESSL",
|
|
|
|
"PGSERVICE",
|
|
|
|
"PGSSLMODE",
|
|
|
|
"PGUSER",
|
|
|
|
"PGPORT",
|
|
|
|
"PGHOST"
|
|
|
|
]
|
|
|
|
|
|
|
|
for e in envs_list:
|
|
|
|
try:
|
|
|
|
del self.test_env[e]
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
self.test_env["LC_MESSAGES"] = "C"
|
|
|
|
self.test_env["LC_TIME"] = "C"
|
|
|
|
|
2016-12-09 20:30:22 +02:00
|
|
|
self.dir_path = path.dirname(os.path.realpath(__file__))
|
|
|
|
try:
|
|
|
|
os.makedirs(path.join(self.dir_path, "tmp_dirs"))
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
self.probackup_path = os.path.abspath(path.join(
|
|
|
|
self.dir_path,
|
|
|
|
"../pg_probackup"
|
|
|
|
))
|
|
|
|
|
|
|
|
def arcwal_dir(self, node):
|
|
|
|
return "%s/backup/wal" % node.base_dir
|
|
|
|
|
|
|
|
def backup_dir(self, node):
|
|
|
|
return os.path.abspath("%s/backup" % node.base_dir)
|
|
|
|
|
2017-04-03 18:21:26 +02:00
|
|
|
def make_bnode(self, base_dir=None, allows_streaming=False, options={}):
|
2016-12-12 18:21:15 +02:00
|
|
|
real_base_dir = path.join(self.dir_path, base_dir)
|
|
|
|
shutil.rmtree(real_base_dir, ignore_errors=True)
|
2017-04-03 18:21:26 +02:00
|
|
|
|
2016-12-12 18:21:15 +02:00
|
|
|
node = get_new_node('test', base_dir=real_base_dir)
|
2017-04-03 18:21:26 +02:00
|
|
|
node.init(allows_streaming=allows_streaming)
|
|
|
|
|
|
|
|
if not allows_streaming:
|
|
|
|
node.append_conf("postgresql.conf", "wal_level = hot_standby")
|
2016-12-09 20:30:22 +02:00
|
|
|
node.append_conf("postgresql.conf", "archive_mode = on")
|
|
|
|
node.append_conf(
|
|
|
|
"postgresql.conf",
|
|
|
|
"""archive_command = 'cp "%%p" "%s/%%f"'""" % os.path.abspath(self.arcwal_dir(node))
|
|
|
|
)
|
2016-12-12 18:21:15 +02:00
|
|
|
|
|
|
|
for key, value in six.iteritems(options):
|
|
|
|
node.append_conf("postgresql.conf", "%s = %s" % (key, value))
|
|
|
|
|
2016-12-09 20:30:22 +02:00
|
|
|
return node
|
|
|
|
|
2017-04-03 18:21:26 +02:00
|
|
|
def make_bnode_replica(self, root_node, base_dir=None, options={}):
|
|
|
|
real_base_dir = path.join(self.dir_path, base_dir)
|
|
|
|
shutil.rmtree(real_base_dir, ignore_errors=True)
|
|
|
|
|
|
|
|
root_node.backup("basebackup")
|
|
|
|
|
|
|
|
replica = get_new_node("replica", base_dir=real_base_dir)
|
|
|
|
# replica.init_from_backup(root_node, "data_replica", has_streaming=True)
|
|
|
|
|
|
|
|
# Move data from backup
|
|
|
|
backup_path = os.path.join(root_node.base_dir, "basebackup")
|
|
|
|
shutil.move(backup_path, replica.data_dir)
|
|
|
|
os.chmod(replica.data_dir, 0o0700)
|
|
|
|
|
|
|
|
# Change port in config file
|
|
|
|
replica.append_conf(
|
|
|
|
"postgresql.conf",
|
|
|
|
"port = {}".format(replica.port)
|
|
|
|
)
|
|
|
|
# Enable streaming
|
|
|
|
replica.enable_streaming(root_node)
|
|
|
|
|
|
|
|
for key, value in six.iteritems(options):
|
|
|
|
replica.append_conf("postgresql.conf", "%s = %s" % (key, value))
|
|
|
|
|
|
|
|
return replica
|
|
|
|
|
2016-12-09 20:30:22 +02:00
|
|
|
def run_pb(self, command):
|
|
|
|
try:
|
|
|
|
return subprocess.check_output(
|
|
|
|
[self.probackup_path] + command,
|
2016-12-13 19:45:50 +02:00
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
env=self.test_env
|
2016-12-09 20:30:22 +02:00
|
|
|
)
|
|
|
|
except subprocess.CalledProcessError as err:
|
|
|
|
return err.output
|
|
|
|
|
|
|
|
def init_pb(self, node):
|
|
|
|
return self.run_pb([
|
|
|
|
"init",
|
|
|
|
"-B", self.backup_dir(node),
|
|
|
|
"-D", node.data_dir
|
|
|
|
])
|
|
|
|
|
|
|
|
def clean_pb(self, node):
|
|
|
|
shutil.rmtree(self.backup_dir(node), ignore_errors=True)
|
|
|
|
|
|
|
|
def backup_pb(self, node, backup_type="full", options=[]):
|
|
|
|
cmd_list = [
|
|
|
|
"backup",
|
|
|
|
"-D", node.data_dir,
|
|
|
|
"-B", self.backup_dir(node),
|
|
|
|
"-p", "%i" % node.port,
|
|
|
|
"-d", "postgres"
|
|
|
|
]
|
|
|
|
if backup_type:
|
|
|
|
cmd_list += ["-b", backup_type]
|
|
|
|
|
|
|
|
return self.run_pb(cmd_list + options)
|
|
|
|
|
2017-04-03 18:21:26 +02:00
|
|
|
def backup_pb_proc(self, node, backup_dir, backup_type="full",
|
|
|
|
stdout=None, stderr=None, options=[]):
|
|
|
|
cmd_list = [
|
|
|
|
self.probackup_path,
|
|
|
|
"backup",
|
|
|
|
"-D", node.data_dir,
|
|
|
|
"-B", backup_dir,
|
|
|
|
"-p", "%i" % (node.port),
|
|
|
|
"-d", "postgres"
|
|
|
|
]
|
|
|
|
if backup_type:
|
|
|
|
cmd_list += ["-b", backup_type]
|
|
|
|
|
|
|
|
proc = subprocess.Popen(
|
|
|
|
cmd_list + options,
|
|
|
|
stdout=stdout,
|
|
|
|
stderr=stderr
|
|
|
|
)
|
|
|
|
|
|
|
|
return proc
|
|
|
|
|
2016-12-13 19:45:50 +02:00
|
|
|
def restore_pb(self, node, id=None, options=[]):
|
|
|
|
cmd_list = [
|
|
|
|
"-D", node.data_dir,
|
|
|
|
"-B", self.backup_dir(node),
|
|
|
|
"restore"
|
|
|
|
]
|
|
|
|
if id:
|
|
|
|
cmd_list.append(id)
|
|
|
|
|
|
|
|
# print(cmd_list)
|
|
|
|
return self.run_pb(cmd_list + options)
|
|
|
|
|
2016-12-12 18:21:15 +02:00
|
|
|
def show_pb(self, node, id=None, options=[], as_text=False):
|
2016-12-09 20:30:22 +02:00
|
|
|
cmd_list = [
|
|
|
|
"-B", self.backup_dir(node),
|
|
|
|
"show",
|
|
|
|
]
|
|
|
|
if id:
|
|
|
|
cmd_list += [id]
|
|
|
|
|
|
|
|
# print(cmd_list)
|
2016-12-12 18:21:15 +02:00
|
|
|
if as_text:
|
|
|
|
return self.run_pb(options + cmd_list)
|
2016-12-19 19:13:58 +02:00
|
|
|
elif id is None:
|
2016-12-12 18:21:15 +02:00
|
|
|
return [ShowBackup(line.split()) for line in self.run_pb(options + cmd_list).splitlines()[3:]]
|
2016-12-19 19:13:58 +02:00
|
|
|
else:
|
|
|
|
return dict([
|
|
|
|
line.split(six.b("="))
|
|
|
|
for line in self.run_pb(options + cmd_list).splitlines()
|
|
|
|
if line[0] != six.b("#")[0]
|
|
|
|
])
|
2016-12-09 20:30:22 +02:00
|
|
|
|
2017-02-17 15:49:06 +02:00
|
|
|
def validate_pb(self, node, id=None, options=[]):
|
2016-12-09 20:30:22 +02:00
|
|
|
cmd_list = [
|
|
|
|
"-B", self.backup_dir(node),
|
|
|
|
"validate",
|
|
|
|
]
|
|
|
|
if id:
|
|
|
|
cmd_list += [id]
|
|
|
|
|
|
|
|
# print(cmd_list)
|
|
|
|
return self.run_pb(options + cmd_list)
|
2016-12-12 18:21:15 +02:00
|
|
|
|
|
|
|
def delete_pb(self, node, id=None, options=[]):
|
|
|
|
cmd_list = [
|
|
|
|
"-B", self.backup_dir(node),
|
|
|
|
"delete",
|
|
|
|
]
|
|
|
|
if id:
|
|
|
|
cmd_list += [id]
|
|
|
|
|
|
|
|
# print(cmd_list)
|
|
|
|
return self.run_pb(options + cmd_list)
|
2016-12-13 19:45:50 +02:00
|
|
|
|
2017-02-12 22:42:10 +02:00
|
|
|
def retention_purge_pb(self, node, options=[]):
|
|
|
|
cmd_list = [
|
|
|
|
"-B", self.backup_dir(node),
|
|
|
|
"retention", "purge",
|
|
|
|
]
|
|
|
|
|
|
|
|
return self.run_pb(options + cmd_list)
|
|
|
|
|
2017-02-14 13:31:15 +02:00
|
|
|
def retention_show(self, node, options=[]):
|
|
|
|
cmd_list = [
|
|
|
|
"-B", self.backup_dir(node),
|
|
|
|
"retention", "show",
|
|
|
|
]
|
|
|
|
|
|
|
|
return self.run_pb(options + cmd_list)
|
|
|
|
|
2016-12-13 19:45:50 +02:00
|
|
|
def get_control_data(self, node):
|
|
|
|
pg_controldata = node.get_bin_path("pg_controldata")
|
|
|
|
out_data = {}
|
|
|
|
lines = subprocess.check_output(
|
|
|
|
[pg_controldata] + ["-D", node.data_dir],
|
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
env=self.test_env
|
|
|
|
).splitlines()
|
|
|
|
for l in lines:
|
|
|
|
key, value = l.split(b":", maxsplit=1)
|
|
|
|
out_data[key.strip()] = value.strip()
|
|
|
|
return out_data
|
|
|
|
|
|
|
|
def get_recovery_conf(self, node):
|
|
|
|
out_dict = {}
|
|
|
|
with open(path.join(node.data_dir, "recovery.conf"), "r") as recovery_conf:
|
|
|
|
for line in recovery_conf:
|
|
|
|
try:
|
|
|
|
key, value = line.split("=")
|
|
|
|
except:
|
|
|
|
continue
|
|
|
|
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
|
|
|
|
|
|
|
|
return out_dict
|
2016-12-20 16:59:51 +02:00
|
|
|
|
|
|
|
def wrong_wal_clean(self, node, wal_size):
|
|
|
|
wals_dir = path.join(self.backup_dir(node), "wal")
|
|
|
|
wals = [f for f in listdir(wals_dir) if path.isfile(path.join(wals_dir, f))]
|
|
|
|
wals.sort()
|
|
|
|
file_path = path.join(wals_dir, wals[-1])
|
|
|
|
if path.getsize(file_path) != wal_size:
|
|
|
|
os.remove(file_path)
|
|
|
|
|
|
|
|
def guc_wal_segment_size(self, node):
|
|
|
|
var = node.execute("postgres", "select setting from pg_settings where name = 'wal_segment_size'")
|
|
|
|
return int(var[0][0]) * self.guc_wal_block_size(node)
|
|
|
|
|
|
|
|
def guc_wal_block_size(self, node):
|
|
|
|
var = node.execute("postgres", "select setting from pg_settings where name = 'wal_block_size'")
|
|
|
|
return int(var[0][0])
|