1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2024-11-28 09:33:54 +02:00

Merge branch 'master' of git.postgrespro.ru:pgpro-dev/pg_probackup

This commit is contained in:
Anastasia 2017-06-07 18:28:35 +03:00
commit e291e53b4a
25 changed files with 386 additions and 163 deletions

View File

@ -6,32 +6,33 @@ from . import init_test, option_test, show_test, \
ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \
ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
ptrack_vacuum_full, ptrack_vacuum_truncate, pgpro560, pgpro589, \
false_positive, replica
pgpro688, false_positive, replica
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromModule(replica))
suite.addTests(loader.loadTestsFromModule(pgpro560))
suite.addTests(loader.loadTestsFromModule(pgpro589))
suite.addTests(loader.loadTestsFromModule(false_positive))
suite.addTests(loader.loadTestsFromModule(init_test))
suite.addTests(loader.loadTestsFromModule(option_test))
suite.addTests(loader.loadTestsFromModule(show_test))
suite.addTests(loader.loadTestsFromModule(backup_test))
suite.addTests(loader.loadTestsFromModule(delete_test))
suite.addTests(loader.loadTestsFromModule(restore_test))
suite.addTests(loader.loadTestsFromModule(validate_test))
suite.addTests(loader.loadTestsFromModule(retention_test))
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
suite.addTests(loader.loadTestsFromModule(ptrack_recovery))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_frozen))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_visibility))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_full))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_truncate))
# suite.addTests(loader.loadTestsFromModule(replica))
# suite.addTests(loader.loadTestsFromModule(pgpro560))
# suite.addTests(loader.loadTestsFromModule(pgpro589))
suite.addTests(loader.loadTestsFromModule(pgpro688))
# suite.addTests(loader.loadTestsFromModule(false_positive))
# suite.addTests(loader.loadTestsFromModule(init_test))
# suite.addTests(loader.loadTestsFromModule(option_test))
# suite.addTests(loader.loadTestsFromModule(show_test))
# suite.addTests(loader.loadTestsFromModule(backup_test))
# suite.addTests(loader.loadTestsFromModule(delete_test))
# suite.addTests(loader.loadTestsFromModule(restore_test))
# suite.addTests(loader.loadTestsFromModule(validate_test))
# suite.addTests(loader.loadTestsFromModule(retention_test))
# suite.addTests(loader.loadTestsFromModule(ptrack_clean))
# suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
# suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
# suite.addTests(loader.loadTestsFromModule(ptrack_recovery))
# suite.addTests(loader.loadTestsFromModule(ptrack_vacuum))
# suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_frozen))
# suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_visibility))
# suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_full))
# suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_truncate))
return suite

View File

@ -1,7 +1,7 @@
import unittest
from os import path, listdir
import six
from .ptrack_helpers import ProbackupTest, ProbackupException
from helpers.ptrack_helpers import ProbackupTest, ProbackupException
from testgres import stop_all
@ -16,6 +16,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
# PGPRO-707
def test_backup_modes_archive(self):
"""standart backup modes with ARCHIVE WAL method"""
fname = self.id().split('.')[3]

View File

@ -1,7 +1,7 @@
import unittest
from os import path
import six
from .ptrack_helpers import ProbackupTest, ProbackupException
from helpers.ptrack_helpers import ProbackupTest, ProbackupException
from testgres import stop_all
import subprocess

View File

@ -42,5 +42,11 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
pg_probackup delete -B backup-dir
[--wal] [-i backup-id | --expired]
pg_probackup add-instance -B backup-dir
--instance=instance_name
pg_probackup del-instance -B backup-dir
--instance=instance_name
Read the website for details. <https://github.com/postgrespro/pg_probackup>
Report bugs to <https://github.com/postgrespro/pg_probackup/issues>.

View File

@ -1,7 +1,7 @@
import unittest
import os
import six
from .ptrack_helpers import ProbackupTest, ProbackupException
from helpers.ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess

View File

@ -55,8 +55,6 @@ Splitted Body
# You can lookup error message and cmdline in exception object attributes
class ProbackupException(Exception):
def __init__(self, message, cmd):
# print message
# self.message = repr(message).strip("'")
self.message = message
self.cmd = cmd
#need that to make second raise
@ -138,24 +136,31 @@ class ProbackupTest(object):
self.helpers_path = os.path.dirname(os.path.realpath(__file__))
self.dir_path = os.path.abspath(os.path.join(self.helpers_path, os.pardir))
self.tmp_path = os.path.abspath(os.path.join(self.dir_path, 'tmp_dirs'))
try:
os.makedirs(os.path.join(self.dir_path, "tmp_dirs"))
os.makedirs(os.path.join(self.dir_path, 'tmp_dirs'))
except:
pass
self.probackup_path = os.path.abspath(os.path.join(
self.dir_path,
"../pg_probackup"
))
self.dir_path, "../pg_probackup"))
def arcwal_dir(self, node):
return "%s/backup/wal" % node.base_dir
def backup_dir(self, node):
return os.path.abspath("%s/backup" % node.base_dir)
def backup_dir(self, node=None, path=None):
if node:
return os.path.abspath("{0}/backup".format(node.base_dir))
if path:
return
def make_simple_node(self, base_dir=None, set_replication=False,
set_archiving=False, initdb_params=[], pg_options={}):
real_base_dir = os.path.join(self.dir_path, base_dir)
def make_simple_node(
self,
base_dir=None,
set_replication=False,
initdb_params=[],
pg_options={}):
real_base_dir = os.path.join(self.tmp_path, base_dir)
shutil.rmtree(real_base_dir, ignore_errors=True)
node = get_new_node('test', base_dir=real_base_dir)
@ -173,9 +178,6 @@ class ProbackupTest(object):
# Allow replication in pg_hba.conf
if set_replication:
node.set_replication_conf()
# Setup archiving for node
if set_archiving:
self.set_archiving_conf(node, self.arcwal_dir(node))
return node
def create_tblspace_in_node(self, node, tblspc_name, cfs=False):
@ -301,6 +303,7 @@ class ProbackupTest(object):
def run_pb(self, command, async=False):
try:
self.cmd = [' '.join(map(str,[self.probackup_path] + command))]
print self.cmd
if async is True:
return subprocess.Popen(
[self.probackup_path] + command,
@ -319,44 +322,55 @@ class ProbackupTest(object):
for line in self.output.splitlines():
if 'INFO: Backup' and 'completed' in line:
return line.split()[2]
# backup_id = line.split()[2]
# return {'cmd': cmd, 'output': output, 'backup_id': backup_id}
else:
return self.output
# return {'cmd': cmd, 'output': output}
except subprocess.CalledProcessError as e:
raise ProbackupException(e.output, e.cmd)
raise ProbackupException(e.output, self.cmd)
def init_pb(self, node):
def init_pb(self, backup_dir):
return self.run_pb([
"init",
"-B", self.backup_dir(node),
"-B", backup_dir
])
def add_instance(self, backup_dir, instance, node):
return self.run_pb([
"add-instance",
"--instance={0}".format(instance),
"-B", backup_dir,
"-D", node.data_dir
])
def del_instance(self, backup_dir, instance, node):
return self.run_pb([
"del-instance",
"--instance={0}".format(instance),
"-B", backup_dir,
"-D", node.data_dir
])
def clean_pb(self, node):
shutil.rmtree(self.backup_dir(node), ignore_errors=True)
def backup_pb(self, node=None, data_dir=None, backup_dir=None, backup_type="full", options=[], async=False):
if data_dir is None:
data_dir = node.data_dir
if backup_dir is None:
backup_dir = self.backup_dir(node)
def backup_node(self, backup_dir, instance, node, backup_type="full", options=[], async=False):
cmd_list = [
"backup",
"-B", backup_dir,
"-D", data_dir,
"-D", node.data_dir,
"-p", "%i" % node.port,
"-d", "postgres"
"-d", "postgres",
"--instance={0}".format(instance)
]
if backup_type:
cmd_list += ["-b", backup_type]
return self.run_pb(cmd_list + options, async)
def restore_pb(self, node=None, backup_dir=None, data_dir=None, id=None, options=[]):
def restore_node(self, backup_dir, instance, data_dir=None, id=None, options=[]):
if data_dir is None:
data_dir = node.data_dir
if backup_dir is None:
@ -365,22 +379,27 @@ class ProbackupTest(object):
cmd_list = [
"restore",
"-B", backup_dir,
"-D", data_dir
"-D", data_dir,
"--instance={0}".format(instance)
]
if id:
cmd_list += ["-i", id]
return self.run_pb(cmd_list + options)
def show_pb(self, node, id=None, options=[], as_text=False):
def show_pb(self, backup_dir, instance=None, backup_id=None, options=[], as_text=False):
backup_list = []
specific_record = {}
cmd_list = [
"show",
"-B", self.backup_dir(node),
"-B", backup_dir,
]
if id:
cmd_list += ["-i", id]
if instance:
cmd_list += ["--instance={0}".format(instance)]
if backup_id:
cmd_list += ["-i", backup_id]
if as_text:
# You should print it when calling as_text=true
@ -388,7 +407,7 @@ class ProbackupTest(object):
# get show result as list of lines
show_splitted = self.run_pb(cmd_list + options).splitlines()
if id is None:
if instance is not None and backup_id is None:
# cut header(ID, Mode, etc) from show as single string
header = show_splitted[1:2][0]
# cut backup records from show as single list with string for every backup record
@ -430,36 +449,40 @@ class ProbackupTest(object):
specific_record[name.strip()] = var
return specific_record
def validate_pb(self, node, id=None, options=[]):
def validate_pb(self, backup_dir, instance=None, id=None, options=[]):
cmd_list = [
"validate",
"-B", self.backup_dir(node),
"-B", backup_dir,
]
if instance:
cmd_list += ["--instance={0}".format(instance)]
if id:
cmd_list += ["-i", id]
# print(cmd_list)
return self.run_pb(cmd_list + options)
def delete_pb(self, node, id=None, options=[]):
def delete_pb(self, backup_dir, instance=None, id=None, options=[]):
cmd_list = [
"delete",
"-B", self.backup_dir(node),
]
if instance:
cmd_list += ["--instance={0}".format(instance)]
if id:
cmd_list += ["-i", id]
# print(cmd_list)
return self.run_pb(cmd_list + options)
def delete_expired(self, node, options=[]):
def delete_expired(self, backup_dir, instance=None, options=[]):
cmd_list = [
"delete", "--expired",
"-B", self.backup_dir(node),
]
return self.run_pb(cmd_list + options)
def show_config(self, node):
def show_config(self, backup_dir, instance=None):
out_dict = {}
cmd_list = [
"show-config",
@ -484,9 +507,7 @@ class ProbackupTest(object):
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
return out_dict
def set_archiving_conf(self, node, archive_dir=False, replica=False):
if not archive_dir:
archive_dir = self.arcwal_dir(node)
def set_archiving(self, backup_dir, instance, node, replica=False):
if replica:
archive_mode = 'always'
@ -505,8 +526,8 @@ class ProbackupTest(object):
if os.name == 'posix':
node.append_conf(
"postgresql.auto.conf",
"archive_command = 'test ! -f {0}/%f && cp %p {0}/%f'".format(archive_dir)
)
"archive_command = '{0} archive-push -B {1} --instance={2} --wal-file-path %p --wal-file-name %f'".format(
self.probackup_path, backup_dir, instance))
#elif os.name == 'nt':
# node.append_conf(
# "postgresql.auto.conf",

View File

@ -3,9 +3,8 @@ from sys import exit
import os
from os import path
import six
from .ptrack_helpers import dir_files, ProbackupTest, ProbackupException
from helpers.ptrack_helpers import dir_files, ProbackupTest, ProbackupException
#TODO
class InitTest(ProbackupTest, unittest.TestCase):
@ -14,44 +13,56 @@ class InitTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_success_1(self):
def test_success(self):
"""Success normal init"""
fname = self.id().split(".")[3]
node = self.make_simple_node(base_dir="tmp_dirs/init/{0}".format(fname))
self.assertEqual(self.init_pb(node), six.b(""))
self.assertEqual(
dir_files(self.backup_dir(node)),
['backups', 'pg_probackup.conf', 'wal']
['backups', 'wal']
)
self.add_instance(node=node, instance='test')
def test_already_exist_2(self):
self.assertEqual("INFO: Instance 'test' deleted successfully\n",
self.del_instance(node=node, instance='test'),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
try:
self.show_pb(node, instance='test')
self.assertEqual(1, 0, 'Expecting Error due to show of non-existing instance. Output: {0} \n CMD: {1}'.format(
repr(self.output), self.cmd))
except ProbackupException, e:
self.assertEqual(e.message,
"ERROR: Instance 'test' does not exist in this backup catalog\n",
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd))
def test_already_exist(self):
"""Failure with backup catalog already existed"""
fname = self.id().split(".")[3]
node = self.make_simple_node(base_dir="tmp_dirs/init/{0}".format(fname))
self.init_pb(node)
try:
self.init_pb(node)
# we should die here because exception is what we expect to happen
exit(1)
self.assertEqual(1, 0, 'Expecting Error due to initialization in non-empty directory. Output: {0} \n CMD: {1}'.format(
repr(self.output), self.cmd))
except ProbackupException, e:
self.assertEqual(
e.message,
"ERROR: backup catalog already exist and it's not empty\n"
)
self.assertEqual(e.message,
"ERROR: backup catalog already exist and it's not empty\n",
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
def test_abs_path_3(self):
def test_abs_path(self):
"""failure with backup catalog should be given as absolute path"""
fname = self.id().split(".")[3]
node = self.make_simple_node(base_dir="tmp_dirs/init/{0}".format(fname))
try:
self.run_pb(["init", "-B", path.relpath("%s/backup" % node.base_dir, self.dir_path)])
# we should die here because exception is what we expect to happen
exit(1)
self.assertEqual(1, 0, 'Expecting Error due to initialization with non-absolute path in --backup-path. Output: {0} \n CMD: {1}'.format(
repr(self.output), self.cmd))
except ProbackupException, e:
self.assertEqual(
e.message,
"ERROR: -B, --backup-path must be an absolute path\n"
)
self.assertEqual(e.message,
"ERROR: -B, --backup-path must be an absolute path\n",
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
if __name__ == '__main__':

View File

@ -1,7 +1,7 @@
import unittest
from os import path
import six
from .ptrack_helpers import ProbackupTest, ProbackupException
from helpers.ptrack_helpers import ProbackupTest, ProbackupException
from testgres import stop_all

View File

@ -1,7 +1,7 @@
import unittest
import os
import six
from .ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess

View File

@ -1,7 +1,7 @@
import unittest
import os
import six
from .ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess

135
tests/pgpro688.py Normal file
View File

@ -0,0 +1,135 @@
import unittest
import os
import six
from helpers.ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
from testgres import stop_all, get_username
import subprocess
from sys import exit, _getframe
import shutil
import time
class ReplicaTest(ProbackupTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ReplicaTest, self).__init__(*args, **kwargs)
self.module_name = 'replica'
self.instance_1 = 'master'
self.instance_2 = 'slave'
# @classmethod
# def tearDownClass(cls):
# stop_all()
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_replica_stream_full_backup(self):
"""
make full stream backup from replica
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, self.module_name, fname, 'backup')
master = self.make_simple_node(base_dir="{0}/{1}/master".format(self.module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '5min'}
)
master.start()
shutil.rmtree(backup_dir, ignore_errors=True)
self.init_pb(backup_dir)
instance = ''
self.add_instance(backup_dir, self.instance_1, master)
self.set_archiving(backup_dir, self.instance_1, master)
master.restart()
slave = self.make_simple_node(base_dir="{0}/{1}/slave".format(self.module_name, fname))
slave_port = slave.port
slave.cleanup()
# FULL BACKUP
self.backup_node(backup_dir, self.instance_1, master, backup_type='full', options=['--stream'])
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
before = master.execute("postgres", "SELECT * FROM t_heap")
#PAGE BACKUP
self.backup_node(backup_dir, self.instance_1, master, backup_type='page', options=['--stream'])
self.restore_node(backup_dir, self.instance_1, slave.data_dir)
slave.append_conf('postgresql.auto.conf', 'port = {0}'.format(slave.port))
slave.append_conf('postgresql.auto.conf', 'hot_standby = on')
slave.append_conf('recovery.conf', "standby_mode = 'on'")
slave.append_conf('recovery.conf',
"primary_conninfo = 'user={0} port={1} sslmode=prefer sslcompression=1'".format(get_username(), master.port))
slave.start({"-t": "600"})
# Replica Ready
# Check replica
after = slave.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Make backup from replica
self.add_instance(backup_dir, self.instance_2, slave)
self.assertTrue('INFO: Wait end of WAL streaming' and 'completed' in
self.backup_node(backup_dir, self.instance_2, slave, backup_type='full', options=[
'--stream', '--log-level=verbose', '--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)]))
self.validate_pb(backup_dir, self.instance_2)
self.assertEqual('OK', self.show_pb(backup_dir, self.instance_2)[0]['Status'])
@unittest.skip("skip")
def test_replica_archive_full_backup(self):
"""
make full archive backup from replica
"""
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/master".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
master.start()
slave = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/slave".format(fname))
slave_port = slave.port
slave.cleanup()
self.assertEqual(self.init_pb(master), six.b(""))
self.backup_pb(node=master, backup_type='full', options=['--stream'])
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
before = master.execute("postgres", "SELECT * FROM t_heap")
id = self.backup_pb(master, backup_type='page', options=['--stream'])
self.restore_pb(backup_dir=self.backup_dir(master), data_dir=slave.data_dir)
# Settings for Replica
slave.append_conf('postgresql.auto.conf', 'port = {0}'.format(slave.port))
slave.append_conf('postgresql.auto.conf', 'hot_standby = on')
# Set Archiving for replica
self.set_archiving_conf(slave, replica=True)
slave.append_conf('recovery.conf', "standby_mode = 'on'")
slave.append_conf('recovery.conf',
"primary_conninfo = 'user=gsmol port={0} sslmode=prefer sslcompression=1'".format(master.port))
slave.start({"-t": "600"})
# Replica Started
# master.execute("postgres", "checkpoint")
# Check replica
after = slave.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Make backup from replica
self.assertEqual(self.init_pb(slave), six.b(""))
self.backup_pb(slave, backup_type='full', options=['--archive-timeout=30'])
self.validate_pb(slave)

View File

@ -1,7 +1,7 @@
import unittest
from sys import exit
from testgres import get_new_node, stop_all
from .ptrack_helpers import ProbackupTest, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, idx_ptrack
class SimpleTest(ProbackupTest, unittest.TestCase):

View File

@ -1,7 +1,7 @@
import unittest
from sys import exit
from testgres import get_new_node, stop_all
from .ptrack_helpers import ProbackupTest, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, idx_ptrack
class SimpleTest(ProbackupTest, unittest.TestCase):

View File

@ -3,7 +3,7 @@ from sys import exit
from testgres import get_new_node, stop_all
import os
from signal import SIGTERM
from .ptrack_helpers import ProbackupTest, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, idx_ptrack
from time import sleep

View File

@ -3,7 +3,7 @@ from sys import exit
from testgres import get_new_node, stop_all
import os
from signal import SIGTERM
from .ptrack_helpers import ProbackupTest, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, idx_ptrack
from time import sleep

View File

@ -1,7 +1,7 @@
import unittest
from sys import exit
from testgres import get_new_node, stop_all
from .ptrack_helpers import ProbackupTest, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, idx_ptrack
class SimpleTest(ProbackupTest, unittest.TestCase):

View File

@ -1,8 +1,7 @@
import unittest
from sys import exit
from testgres import get_new_node, stop_all
from os import path, open, lseek, read, close, O_RDONLY
from .ptrack_helpers import ProbackupTest, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, idx_ptrack
class SimpleTest(ProbackupTest, unittest.TestCase):

View File

@ -1,8 +1,7 @@
import unittest
from sys import exit
from testgres import get_new_node, stop_all
from os import path, open, lseek, read, close, O_RDONLY
from .ptrack_helpers import ProbackupTest, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, idx_ptrack
class SimpleTest(ProbackupTest, unittest.TestCase):

View File

@ -2,8 +2,7 @@ import unittest
from sys import exit
from testgres import get_new_node, stop_all
#import os
from os import path, open, lseek, read, close, O_RDONLY
from .ptrack_helpers import ProbackupTest, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, idx_ptrack
class SimpleTest(ProbackupTest, unittest.TestCase):

View File

@ -1,8 +1,7 @@
import unittest
from sys import exit
from testgres import get_new_node, stop_all
from os import path, open, lseek, read, close, O_RDONLY
from .ptrack_helpers import ProbackupTest, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, idx_ptrack
class SimpleTest(ProbackupTest, unittest.TestCase):

View File

@ -1,17 +1,22 @@
import unittest
import os
import six
from .ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess
from sys import exit
from sys import exit, _getframe
import shutil
import time
class ReplicaTest(ProbackupTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ReplicaTest, self).__init__(*args, **kwargs)
self.module_name = 'replica'
self.instance_1 = 'master'
self.instance_2 = 'slave'
# @classmethod
# def tearDownClass(cls):
@ -24,29 +29,37 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
make full stream backup from replica
"""
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/master".format(fname),
set_archiving=True,
backup_dir = os.path.join(self.tmp_path, self.module_name, fname, 'backup')
master = self.make_simple_node(base_dir="{0}/{1}/master".format(self.module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s'}
)
master.start()
slave = self.make_simple_node(base_dir="tmp_dirs/replica/{0}/slave".format(fname))
master.start()
shutil.rmtree(backup_dir, ignore_errors=True)
self.init_pb(backup_dir)
instance = ''
self.add_instance(backup_dir, self.instance_1, master)
self.set_archiving(backup_dir, self.instance_1, master)
master.restart()
slave = self.make_simple_node(base_dir="{0}/{1}/slave".format(self.module_name, fname))
slave_port = slave.port
slave.cleanup()
self.assertEqual(self.init_pb(master), six.b(""))
self.backup_pb(master, backup_type='full', options=['--stream'])
# FULL BACKUP
self.backup_node(backup_dir, self.instance_1, master, backup_type='full', options=['--stream'])
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
before = master.execute("postgres", "SELECT * FROM t_heap")
id = self.backup_pb(master, backup_type='page', options=['--stream'])
self.restore_pb(backup_dir=self.backup_dir(master), data_dir=slave.data_dir)
#PAGE BACKUP
self.backup_node(backup_dir, self.instance_1, master, backup_type='page', options=['--stream'])
self.restore_node(backup_dir, self.instance_1, slave.data_dir)
slave.append_conf('postgresql.auto.conf', 'port = {0}'.format(slave.port))
slave.append_conf('postgresql.auto.conf', 'hot_standby = on')
@ -60,12 +73,13 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
after = slave.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# master.execute("postgres", "checkpoint")
master.execute("postgres", "create table t1(a int)")
# Make backup from replica
self.assertEqual(self.init_pb(slave), six.b(""))
self.backup_pb(slave, backup_type='full', options=['--stream'])
self.add_instance(backup_dir, self.instance_2, slave)
time.sleep(2)
self.assertTrue('INFO: Wait end of WAL streaming' and 'completed' in
self.backup_node(backup_dir, self.instance_2, slave, backup_type='full', options=['--stream', '--log-level=verbose']))
self.validate_pb(backup_dir, self.instance_2)
self.assertEqual('OK', self.show_pb(backup_dir, self.instance_2)[0]['Status'])
@unittest.skip("skip")
def test_replica_archive_full_backup(self):
@ -87,7 +101,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
slave.cleanup()
self.assertEqual(self.init_pb(master), six.b(""))
self.backup_pb(master, backup_type='full', options=['--stream'])
self.backup_pb(node=master, backup_type='full', options=['--stream'])
master.psql(
"postgres",

View File

@ -2,7 +2,7 @@ import unittest
import os
from os import path
import six
from .ptrack_helpers import ProbackupTest, ProbackupException
from helpers.ptrack_helpers import ProbackupTest, ProbackupException
from testgres import stop_all
import subprocess
from datetime import datetime
@ -18,7 +18,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
def tearDownClass(cls):
stop_all()
# @unittest.skip("skip")
@unittest.skip("skip")
# @unittest.expectedFailure
def test_restore_full_to_latest(self):
"""recovery to latest from full backup"""
@ -55,6 +55,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
@unittest.skip("skip")
def test_restore_full_page_to_latest(self):
"""recovery to latest from full + page backups"""
fname = self.id().split('.')[3]
@ -91,6 +92,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
@unittest.skip("skip")
def test_restore_to_timeline(self):
"""recovery to target timeline"""
fname = self.id().split('.')[3]
@ -139,6 +141,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
@unittest.skip("skip")
def test_restore_to_time(self):
"""recovery to target timeline"""
fname = self.id().split('.')[3]
@ -153,19 +156,18 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.backup_pb(node, backup_type="full")
backup_id = self.backup_pb(node, backup_type="full")
target_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
pgbench = node.pgbench(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
node.stop({"-m": "immediate"})
node.stop()
node.cleanup()
self.assertTrue(six.b("INFO: Restore of backup") and
six.b("completed.") in self.restore_pb(node,
options=["-j", "4", '--time="%s"' % target_time]))
self.assertTrue(six.b("INFO: Restore of backup {0} completed.".format(backup_id)) in self.restore_pb(node,
options=["-j", "4", '--time="{0}"'.format(target_time)]))
node.start({"-t": "600"})
@ -174,6 +176,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
@unittest.skip("skip")
def test_restore_to_xid(self):
"""recovery to target xid"""
fname = self.id().split('.')[3]
@ -224,7 +227,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
def test_restore_full_ptrack(self):
@unittest.skip("skip")
def test_restore_full_ptrack_archive(self):
"""recovery to latest from full + ptrack backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
@ -267,6 +271,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
@unittest.skip("skip")
def test_restore_full_ptrack_ptrack(self):
"""recovery to latest from full + ptrack + ptrack backups"""
fname = self.id().split('.')[3]
@ -339,15 +344,15 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
pgbench.wait()
pgbench.stdout.close()
self.backup_pb(node, backup_type="ptrack", options=["--stream"])
backup_id = self.backup_pb(node, backup_type="ptrack", options=["--stream"])
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
node.stop({"-m": "immediate"})
node.stop()
node.cleanup()
self.assertTrue(six.b("INFO: Restore of backup") and
six.b("completed.") in self.restore_pb(node, options=["-j", "4"]))
self.assertTrue(six.b("INFO: Restore of backup {0} completed.".format(backup_id)) in
self.restore_pb(node, options=["-j", "4"]))
node.start({"-t": "600"})
@ -356,6 +361,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
@unittest.skip("skip")
def test_restore_full_ptrack_under_load(self):
"""recovery to latest from full + ptrack backups with loads when ptrack backup do"""
fname = self.id().split('.')[3]
@ -410,6 +416,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
@unittest.skip("skip")
def test_restore_full_under_load_ptrack(self):
"""recovery to latest from full + page backups with loads when full backup do"""
fname = self.id().split('.')[3]
@ -465,6 +472,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
@unittest.skip("skip")
def test_restore_to_xid_inclusive(self):
"""recovery with target inclusive false"""
fname = self.id().split('.')[3]
@ -519,6 +527,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
@unittest.skip("skip")
def test_restore_with_tablespace_mapping_1(self):
"""recovery using tablespace-mapping option"""
fname = self.id().split('.')[3]
@ -599,6 +608,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
@unittest.skip("skip")
def test_restore_with_tablespace_mapping_2(self):
"""recovery using tablespace-mapping option and page backup"""
fname = self.id().split('.')[3]
@ -659,10 +669,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.assertEqual(count[0][0], 4)
node.stop()
@unittest.skip("skip")
def test_archive_node_backup_stream_restore_to_recovery_time(self):
"""
make node with archiving, make stream backup,
get Recovery Time, try to make PITR to Recovery Time
"""make node with archiving, make stream backup, make PITR to Recovery Time
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/pgpro668/{0}".format(fname),
@ -674,25 +683,27 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
recovery_time = self.show_pb(node, id=id)['recovery-time']
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
backup_id = self.backup_pb(node, backup_type='full', options=["--stream"])
node.psql("postgres", "create table t_heap(a int)")
node.psql("postgres", "select pg_switch_xlog()")
node.stop()
node.cleanup()
self.assertTrue(six.b("INFO: Restore of backup") and
six.b("completed.") in self.restore_pb(
node, options=['--time="{0}"'.format(recovery_time)]))
recovery_time = self.show_pb(node, id=backup_id)['recovery-time']
self.assertTrue(six.b("INFO: Restore of backup {0} completed.".format(backup_id)) in
self.restore_pb(node, options=["-j", "4", '--time="{0}"'.format(recovery_time)]))
node.start({"-t": "600"})
res = node.psql("postgres", 'select * from t_heap')
self.assertEqual(True, 'does not exist' in res[2])
self.assertEqual(True, node.status())
node.stop()
def test_archive_node_backup_stream_additional_commit_pitr(self):
"""
make node with archiving, make stream backup, create table t_heap,
try to make pitr to Recovery Time, check that t_heap do not exists
"""
@unittest.skip("skip")
def test_archive_node_backup_stream_pitr(self):
"""make node with archiving, make stream backup, create table t_heap, make pitr to Recovery Time, check that t_heap do not exists"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
set_archiving=True,
@ -703,18 +714,50 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
id = self.backup_pb(node, backup_type='full', options=["--stream"])
backup_id = self.backup_pb(node, backup_type='full', options=["--stream"])
node.psql("postgres", "create table t_heap(a int)")
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.psql("postgres", "select pg_switch_xlog()")
node.stop()
node.cleanup()
recovery_time = self.show_pb(node, id=id)['recovery-time']
self.assertTrue(six.b("INFO: Restore of backup") and
six.b("completed.") in self.restore_pb(node,
options=["-j", "4", '--time="{0}"'.format(recovery_time)]))
recovery_time = self.show_pb(node, id=backup_id)['recovery-time']
self.assertTrue(six.b("INFO: Restore of backup {0} completed.".format(backup_id)) in
self.restore_pb(node, options=["-j", "4", '--time="{0}"'.format(recovery_time)]))
node.start({"-t": "600"})
res = node.psql("postgres", 'select * from t_heap')
self.assertEqual(True, 'does not exist' in res[2])
node.stop()
@unittest.skip("fucking illegal")
def test_archive_node_backup_stream_pitr_2(self):
"""make node with archiving, make stream backup, create table t_heap, make pitr to Recovery Time, check that t_heap do not exists"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/restore/{0}".format(fname),
set_archiving=True,
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
node.start()
# exit(1)
self.assertEqual(self.init_pb(node), six.b(""))
backup_id = self.backup_pb(node, backup_type='full', options=["--stream"])
node.psql("postgres", "create table t_heap(a int)")
node.psql("postgres", "select pg_switch_xlog()")
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
node.cleanup()
recovery_time = self.show_pb(node, id=backup_id)['recovery-time']
self.assertTrue(six.b("INFO: Restore of backup {0} completed.".format(backup_id)) in self.restore_pb(node,
options=["-j", "4", '--time="{0}"'.format(recovery_time)]))
node.start({"-t": "600"})
res = node.psql("postgres", 'select * from t_heap')
self.assertEqual(True, 'does not exist' in res[2])
# node.stop()

View File

@ -2,7 +2,7 @@ import unittest
import os
from datetime import datetime, timedelta
from os import path, listdir
from .ptrack_helpers import ProbackupTest
from helpers.ptrack_helpers import ProbackupTest
from testgres import stop_all

View File

@ -2,7 +2,7 @@ import unittest
import os
from os import path
import six
from .ptrack_helpers import ProbackupTest
from helpers.ptrack_helpers import ProbackupTest
from testgres import stop_all

View File

@ -2,7 +2,6 @@ import unittest
import os
import six
from helpers.ptrack_helpers import ProbackupTest, ProbackupException
#from helpers.expected_errors import satisfying_full_backup_not_found, wal_corruption
from datetime import datetime, timedelta
from testgres import stop_all
import subprocess
@ -22,11 +21,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_validate_wal_unreal_values(self):
"""
make node with archiving
make archive backup
validate to both real and unreal values
"""
"""make node with archiving, make archive backup, validate to both real and unreal values"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="tmp_dirs/validate/{0}".format(fname),
set_archiving=True,