mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-02-04 14:11:31 +02:00
Merge remote-tracking branch 'origin/testgres1'
This commit is contained in:
commit
78503f905b
@ -8,7 +8,7 @@ import unittest
|
|||||||
import signal
|
import signal
|
||||||
|
|
||||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||||
from testgres import ClusterException as StartNodeException
|
from testgres import StartNodeException
|
||||||
|
|
||||||
module_name = 'auth_test'
|
module_name = 'auth_test'
|
||||||
skip_test = False
|
skip_test = False
|
||||||
@ -123,7 +123,7 @@ class AuthTest(unittest.TestCase):
|
|||||||
self.fail(e.value)
|
self.fail(e.value)
|
||||||
|
|
||||||
def test_right_password_and_wrong_pgpass(self):
|
def test_right_password_and_wrong_pgpass(self):
|
||||||
""" Test case: PGPB_AUTH05 - correct password and incorrect .pgpass"""
|
""" Test case: PGPB_AUTH05 - correct password and incorrect .pgpass (-W)"""
|
||||||
line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password'])
|
line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password'])
|
||||||
create_pgpass(self.pgpass_file, line)
|
create_pgpass(self.pgpass_file, line)
|
||||||
try:
|
try:
|
||||||
@ -197,7 +197,7 @@ class AuthTest(unittest.TestCase):
|
|||||||
|
|
||||||
def run_pb_with_auth(cmd, password=None, kill=False):
|
def run_pb_with_auth(cmd, password=None, kill=False):
|
||||||
try:
|
try:
|
||||||
with spawn(" ".join(cmd), timeout=10) as probackup:
|
with spawn(" ".join(cmd), encoding='utf-8', timeout=10) as probackup:
|
||||||
result = probackup.expect("Password for user .*:", 5)
|
result = probackup.expect("Password for user .*:", 5)
|
||||||
if kill:
|
if kill:
|
||||||
probackup.kill(signal.SIGINT)
|
probackup.kill(signal.SIGINT)
|
||||||
|
@ -11,7 +11,7 @@ import pwd
|
|||||||
import select
|
import select
|
||||||
import psycopg2
|
import psycopg2
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
import re
|
||||||
|
|
||||||
idx_ptrack = {
|
idx_ptrack = {
|
||||||
't_heap': {
|
't_heap': {
|
||||||
@ -156,6 +156,14 @@ class ProbackupTest(object):
|
|||||||
if 'ARCHIVE_COMPRESSION' in self.test_env:
|
if 'ARCHIVE_COMPRESSION' in self.test_env:
|
||||||
if self.test_env['ARCHIVE_COMPRESSION'] == 'ON':
|
if self.test_env['ARCHIVE_COMPRESSION'] == 'ON':
|
||||||
self.archive_compress = True
|
self.archive_compress = True
|
||||||
|
try:
|
||||||
|
testgres.configure_testgres(
|
||||||
|
cache_initdb=False,
|
||||||
|
cached_initdb_dir=False,
|
||||||
|
cache_pg_config=False,
|
||||||
|
node_cleanup_full=False)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
self.helpers_path = os.path.dirname(os.path.realpath(__file__))
|
self.helpers_path = os.path.dirname(os.path.realpath(__file__))
|
||||||
self.dir_path = os.path.abspath(
|
self.dir_path = os.path.abspath(
|
||||||
@ -193,11 +201,16 @@ class ProbackupTest(object):
|
|||||||
|
|
||||||
real_base_dir = os.path.join(self.tmp_path, base_dir)
|
real_base_dir = os.path.join(self.tmp_path, base_dir)
|
||||||
shutil.rmtree(real_base_dir, ignore_errors=True)
|
shutil.rmtree(real_base_dir, ignore_errors=True)
|
||||||
|
os.makedirs(real_base_dir)
|
||||||
|
|
||||||
node = testgres.get_new_node('test', base_dir=real_base_dir)
|
node = testgres.get_new_node('test', base_dir=real_base_dir)
|
||||||
node.init(initdb_params=initdb_params)
|
node.should_rm_dirs = True
|
||||||
|
node.init(
|
||||||
|
initdb_params=initdb_params, allow_streaming=set_replication)
|
||||||
|
print(node.data_dir)
|
||||||
|
|
||||||
# Sane default parameters, not a shit with fsync = off from testgres
|
# Sane default parameters, not a shit with fsync = off from testgres
|
||||||
|
node.append_conf("postgresql.auto.conf", "max_connections = 100")
|
||||||
node.append_conf("postgresql.auto.conf", "shared_buffers = 10MB")
|
node.append_conf("postgresql.auto.conf", "shared_buffers = 10MB")
|
||||||
node.append_conf("postgresql.auto.conf", "fsync = on")
|
node.append_conf("postgresql.auto.conf", "fsync = on")
|
||||||
node.append_conf("postgresql.auto.conf", "wal_level = minimal")
|
node.append_conf("postgresql.auto.conf", "wal_level = minimal")
|
||||||
@ -217,8 +230,13 @@ class ProbackupTest(object):
|
|||||||
|
|
||||||
# Allow replication in pg_hba.conf
|
# Allow replication in pg_hba.conf
|
||||||
if set_replication:
|
if set_replication:
|
||||||
node.set_replication_conf()
|
node.append_conf(
|
||||||
node.append_conf("postgresql.auto.conf", "max_wal_senders = 10")
|
"pg_hba.conf",
|
||||||
|
"local replication all trust\n")
|
||||||
|
node.append_conf(
|
||||||
|
"postgresql.auto.conf",
|
||||||
|
"max_wal_senders = 10")
|
||||||
|
|
||||||
return node
|
return node
|
||||||
|
|
||||||
def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False):
|
def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False):
|
||||||
@ -235,7 +253,8 @@ class ProbackupTest(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if not tblspc_path:
|
if not tblspc_path:
|
||||||
tblspc_path = os.path.join(node.base_dir, '{0}'.format(tblspc_name))
|
tblspc_path = os.path.join(
|
||||||
|
node.base_dir, '{0}'.format(tblspc_name))
|
||||||
cmd = "CREATE TABLESPACE {0} LOCATION '{1}'".format(
|
cmd = "CREATE TABLESPACE {0} LOCATION '{1}'".format(
|
||||||
tblspc_name, tblspc_path)
|
tblspc_name, tblspc_path)
|
||||||
if cfs:
|
if cfs:
|
||||||
@ -272,11 +291,11 @@ class ProbackupTest(object):
|
|||||||
|
|
||||||
size = size_in_pages
|
size = size_in_pages
|
||||||
for segment_number in range(nsegments):
|
for segment_number in range(nsegments):
|
||||||
if size-131072 > 0:
|
if size - 131072 > 0:
|
||||||
pages_per_segment[segment_number] = 131072
|
pages_per_segment[segment_number] = 131072
|
||||||
else:
|
else:
|
||||||
pages_per_segment[segment_number] = size
|
pages_per_segment[segment_number] = size
|
||||||
size = size-131072
|
size = size - 131072
|
||||||
|
|
||||||
for segment_number in range(nsegments):
|
for segment_number in range(nsegments):
|
||||||
offset = 0
|
offset = 0
|
||||||
@ -369,6 +388,7 @@ class ProbackupTest(object):
|
|||||||
idx_dict['ptrack'][PageNum])
|
idx_dict['ptrack'][PageNum])
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Ok, all pages in new_pages that do not have
|
# Ok, all pages in new_pages that do not have
|
||||||
# corresponding page in old_pages are been dealt with.
|
# corresponding page in old_pages are been dealt with.
|
||||||
# We can now safely proceed to comparing old and new pages
|
# We can now safely proceed to comparing old and new pages
|
||||||
@ -394,7 +414,7 @@ class ProbackupTest(object):
|
|||||||
if PageNum == 0 and idx_dict['type'] == 'spgist':
|
if PageNum == 0 and idx_dict['type'] == 'spgist':
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print(
|
print(
|
||||||
'SPGIST is a special snowflake, so don`t'
|
'SPGIST is a special snowflake, so don`t '
|
||||||
'fret about losing ptrack for blknum 0'
|
'fret about losing ptrack for blknum 0'
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
@ -721,9 +741,9 @@ class ProbackupTest(object):
|
|||||||
archive_mode = 'on'
|
archive_mode = 'on'
|
||||||
|
|
||||||
node.append_conf(
|
node.append_conf(
|
||||||
"postgresql.auto.conf",
|
"postgresql.auto.conf",
|
||||||
"wal_level = archive"
|
"wal_level = archive"
|
||||||
)
|
)
|
||||||
node.append_conf(
|
node.append_conf(
|
||||||
"postgresql.auto.conf",
|
"postgresql.auto.conf",
|
||||||
"archive_mode = {0}".format(archive_mode)
|
"archive_mode = {0}".format(archive_mode)
|
||||||
@ -817,7 +837,15 @@ class ProbackupTest(object):
|
|||||||
return pwd.getpwuid(os.getuid())[0]
|
return pwd.getpwuid(os.getuid())[0]
|
||||||
|
|
||||||
def version_to_num(self, version):
|
def version_to_num(self, version):
|
||||||
return testgres.version_to_num(version)
|
if not version:
|
||||||
|
return 0
|
||||||
|
parts = version.split(".")
|
||||||
|
while len(parts) < 3:
|
||||||
|
parts.append("0")
|
||||||
|
num = 0
|
||||||
|
for part in parts:
|
||||||
|
num = num * 100 + int(re.sub("[^\d]", "", part))
|
||||||
|
return num
|
||||||
|
|
||||||
def switch_wal_segment(self, node):
|
def switch_wal_segment(self, node):
|
||||||
""" Execute pg_switch_wal/xlog() in given node"""
|
""" Execute pg_switch_wal/xlog() in given node"""
|
||||||
@ -829,7 +857,11 @@ class ProbackupTest(object):
|
|||||||
node.safe_psql("postgres", "select pg_switch_xlog()")
|
node.safe_psql("postgres", "select pg_switch_xlog()")
|
||||||
|
|
||||||
def get_version(self, node):
|
def get_version(self, node):
|
||||||
return testgres.get_config()["VERSION_NUM"]
|
return self.version_to_num(
|
||||||
|
testgres.get_pg_config()["VERSION"].split(" ")[1])
|
||||||
|
|
||||||
|
def get_bin_path(self, binary):
|
||||||
|
return testgres.get_bin_path(binary)
|
||||||
|
|
||||||
def del_test_dir(self, module_name, fname):
|
def del_test_dir(self, module_name, fname):
|
||||||
""" Del testdir and optimistically try to del module dir"""
|
""" Del testdir and optimistically try to del module dir"""
|
||||||
@ -960,7 +992,11 @@ class ProbackupTest(object):
|
|||||||
)
|
)
|
||||||
for page in restored_pgdata['files'][file]['md5_per_page']:
|
for page in restored_pgdata['files'][file]['md5_per_page']:
|
||||||
if page not in original_pgdata['files'][file]['md5_per_page']:
|
if page not in original_pgdata['files'][file]['md5_per_page']:
|
||||||
error_message += '\n Extra page {0}\n File: {1}\n'.format(page, os.path.join(restored_pgdata['pgdata'], file))
|
error_message += '\n Extra page {0}\n '
|
||||||
|
'File: {1}\n'.format(
|
||||||
|
page,
|
||||||
|
os.path.join(
|
||||||
|
restored_pgdata['pgdata'], file))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
error_message += (
|
error_message += (
|
||||||
|
@ -3,7 +3,8 @@ import unittest
|
|||||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
import subprocess
|
import subprocess
|
||||||
from testgres import ClusterException
|
from testgres import ClusterTestgresException as ClusterException
|
||||||
|
from testgres import QueryException
|
||||||
import shutil
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
@ -1233,7 +1234,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
|||||||
"\n Output: {0} \n CMD: {1}".format(
|
"\n Output: {0} \n CMD: {1}".format(
|
||||||
repr(self.output), self.cmd)
|
repr(self.output), self.cmd)
|
||||||
)
|
)
|
||||||
except ClusterException as e:
|
except QueryException as e:
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
'FATAL: database "db1" does not exist' in e.message,
|
'FATAL: database "db1" does not exist' in e.message,
|
||||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||||
|
@ -40,7 +40,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print('Killing postmaster. Losing Ptrack changes')
|
print('Killing postmaster. Losing Ptrack changes')
|
||||||
node.pg_ctl('stop', {'-m': 'immediate', '-D': '{0}'.format(node.data_dir)})
|
node.stop(['-m', 'immediate', '-D', node.data_dir])
|
||||||
if not node.status():
|
if not node.status():
|
||||||
node.start()
|
node.start()
|
||||||
else:
|
else:
|
||||||
|
@ -39,7 +39,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.set_replica(master, replica)
|
self.set_replica(master, replica)
|
||||||
|
|
||||||
# Check data correctness on replica
|
# Check data correctness on replica
|
||||||
replica.start({"-t": "600"})
|
replica.start(["-t", "600"])
|
||||||
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||||
self.assertEqual(before, after)
|
self.assertEqual(before, after)
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
# Settings for Replica
|
# Settings for Replica
|
||||||
self.set_replica(master, replica)
|
self.set_replica(master, replica)
|
||||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||||
replica.start({"-t": "600"})
|
replica.start(["-t", "600"])
|
||||||
|
|
||||||
# Check data correctness on replica
|
# Check data correctness on replica
|
||||||
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||||
|
@ -20,6 +20,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
initdb_params=['--data-checksums'],
|
initdb_params=['--data-checksums'],
|
||||||
pg_options={'wal_level': 'replica'}
|
pg_options={'wal_level': 'replica'}
|
||||||
)
|
)
|
||||||
|
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
self.init_pb(backup_dir)
|
self.init_pb(backup_dir)
|
||||||
self.add_instance(backup_dir, 'node', node)
|
self.add_instance(backup_dir, 'node', node)
|
||||||
@ -126,7 +127,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]),
|
self.restore_node(backup_dir, 'node', node, options=["-j", "4"]),
|
||||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
|
||||||
|
|
||||||
node.start(params={'-t':'10'})
|
node.start(params=['-t','10'])
|
||||||
while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
|
while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user