mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-03-21 21:47:17 +02:00
tests: minor fixes
This commit is contained in:
parent
57ca17ad6c
commit
b52ee32440
@ -2,7 +2,7 @@ import os
|
|||||||
import shutil
|
import shutil
|
||||||
import gzip
|
import gzip
|
||||||
import unittest
|
import unittest
|
||||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, archive_script
|
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
import subprocess
|
import subprocess
|
||||||
from sys import exit
|
from sys import exit
|
||||||
@ -221,7 +221,10 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_pgpro434_3(self):
|
def test_pgpro434_3(self):
|
||||||
"""Check pg_stop_backup_timeout, needed backup_timeout"""
|
"""
|
||||||
|
Check pg_stop_backup_timeout, needed backup_timeout
|
||||||
|
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
|
||||||
|
"""
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -236,40 +239,32 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.add_instance(backup_dir, 'node', node)
|
self.add_instance(backup_dir, 'node', node)
|
||||||
self.set_archiving(backup_dir, 'node', node)
|
self.set_archiving(backup_dir, 'node', node)
|
||||||
|
|
||||||
archive_script_path = os.path.join(backup_dir, 'archive_script.sh')
|
|
||||||
with open(archive_script_path, 'w+') as f:
|
|
||||||
f.write(
|
|
||||||
archive_script.format(
|
|
||||||
backup_dir=backup_dir, node_name='node', count_limit=2))
|
|
||||||
|
|
||||||
st = os.stat(archive_script_path)
|
|
||||||
os.chmod(archive_script_path, st.st_mode | 0o111)
|
|
||||||
node.append_conf(
|
|
||||||
'postgresql.auto.conf', "archive_command = '{0} %p %f'".format(
|
|
||||||
archive_script_path))
|
|
||||||
|
|
||||||
node.slow_start()
|
node.slow_start()
|
||||||
|
|
||||||
try:
|
gdb = self.backup_node(
|
||||||
self.backup_node(
|
|
||||||
backup_dir, 'node', node,
|
backup_dir, 'node', node,
|
||||||
options=[
|
options=[
|
||||||
"--archive-timeout=60",
|
"--archive-timeout=60",
|
||||||
"--stream"]
|
"--stream",
|
||||||
)
|
"--log-level-file=info"],
|
||||||
# we should die here because exception is what we expect to happen
|
gdb=True)
|
||||||
self.assertEqual(
|
|
||||||
1, 0,
|
|
||||||
"Expecting Error because pg_stop_backup failed to answer.\n "
|
|
||||||
"Output: {0} \n CMD: {1}".format(
|
|
||||||
repr(self.output), self.cmd))
|
|
||||||
|
|
||||||
except ProbackupException as e:
|
gdb.set_breakpoint('pg_stop_backup')
|
||||||
self.assertTrue(
|
gdb.run_until_break()
|
||||||
"ERROR: pg_stop_backup doesn't answer" in e.message and
|
|
||||||
"cancel it" in e.message,
|
node.append_conf(
|
||||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
'postgresql.auto.conf', "archive_command = 'exit 1'")
|
||||||
repr(e.message), self.cmd))
|
node.reload()
|
||||||
|
|
||||||
|
gdb.continue_execution_until_exit()
|
||||||
|
|
||||||
|
log_file = os.path.join(backup_dir, 'log/pg_probackup.log')
|
||||||
|
with open(log_file, 'r') as f:
|
||||||
|
log_content = f.read()
|
||||||
|
self.assertNotIn(
|
||||||
|
"ERROR: pg_stop_backup doesn't answer",
|
||||||
|
log_content,
|
||||||
|
"pg_stop_backup timeouted")
|
||||||
|
|
||||||
log_file = os.path.join(node.logs_dir, 'postgresql.log')
|
log_file = os.path.join(node.logs_dir, 'postgresql.log')
|
||||||
with open(log_file, 'r') as f:
|
with open(log_file, 'r') as f:
|
||||||
@ -331,6 +326,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
wal_src = os.path.join(
|
wal_src = os.path.join(
|
||||||
node.data_dir, 'pg_wal', '000000010000000000000001')
|
node.data_dir, 'pg_wal', '000000010000000000000001')
|
||||||
|
|
||||||
if self.archive_compress:
|
if self.archive_compress:
|
||||||
with open(wal_src, 'rb') as f_in, gzip.open(
|
with open(wal_src, 'rb') as f_in, gzip.open(
|
||||||
file, 'wb', compresslevel=1) as f_out:
|
file, 'wb', compresslevel=1) as f_out:
|
||||||
@ -412,7 +408,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.del_test_dir(module_name, fname)
|
self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
# @unittest.expectedFailure
|
# @unittest.expectedFailure
|
||||||
@unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_replica_archive(self):
|
def test_replica_archive(self):
|
||||||
"""
|
"""
|
||||||
make node without archiving, take stream backup and
|
make node without archiving, take stream backup and
|
||||||
@ -502,7 +498,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
"postgres",
|
"postgres",
|
||||||
"insert into t_heap as select i as id, md5(i::text) as text, "
|
"insert into t_heap as select i as id, md5(i::text) as text, "
|
||||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||||
"from generate_series(512,20680) i")
|
"from generate_series(512,80680) i")
|
||||||
|
|
||||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||||
|
|
||||||
@ -510,11 +506,13 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
"postgres",
|
"postgres",
|
||||||
"CHECKPOINT")
|
"CHECKPOINT")
|
||||||
|
|
||||||
|
self.wait_until_replica_catch_with_master(master, replica)
|
||||||
|
|
||||||
backup_id = self.backup_node(
|
backup_id = self.backup_node(
|
||||||
backup_dir, 'replica',
|
backup_dir, 'replica',
|
||||||
replica, backup_type='page',
|
replica, backup_type='page',
|
||||||
options=[
|
options=[
|
||||||
'--archive-timeout=30',
|
'--archive-timeout=60',
|
||||||
'--master-db=postgres',
|
'--master-db=postgres',
|
||||||
'--master-host=localhost',
|
'--master-host=localhost',
|
||||||
'--master-port={0}'.format(master.port),
|
'--master-port={0}'.format(master.port),
|
||||||
|
@ -60,19 +60,6 @@ idx_ptrack = {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
archive_script = """
|
|
||||||
#!/bin/bash
|
|
||||||
count=$(ls {backup_dir}/test00* | wc -l)
|
|
||||||
if [ $count -ge {count_limit} ]
|
|
||||||
then
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
cp $1 {backup_dir}/wal/{node_name}/$2
|
|
||||||
count=$((count+1))
|
|
||||||
touch {backup_dir}/test00$count
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
"""
|
|
||||||
warning = """
|
warning = """
|
||||||
Wrong splint in show_pb
|
Wrong splint in show_pb
|
||||||
Original Header:
|
Original Header:
|
||||||
|
@ -3,6 +3,8 @@ import unittest
|
|||||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import gzip
|
||||||
|
import shutil
|
||||||
|
|
||||||
module_name = 'page'
|
module_name = 'page'
|
||||||
|
|
||||||
@ -781,7 +783,22 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
wals_dir, f)) and not f.endswith('.backup')]
|
wals_dir, f)) and not f.endswith('.backup')]
|
||||||
wals = map(str, wals)
|
wals = map(str, wals)
|
||||||
# file = os.path.join(wals_dir, max(wals))
|
# file = os.path.join(wals_dir, max(wals))
|
||||||
|
|
||||||
|
if self.archive_compress:
|
||||||
|
original_file = os.path.join(wals_dir, '000000010000000000000004.gz')
|
||||||
|
tmp_file = os.path.join(backup_dir, '000000010000000000000004')
|
||||||
|
|
||||||
|
with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out:
|
||||||
|
shutil.copyfileobj(f_in, f_out)
|
||||||
|
|
||||||
|
# drop healthy file
|
||||||
|
os.remove(original_file)
|
||||||
|
file = tmp_file
|
||||||
|
|
||||||
|
else:
|
||||||
file = os.path.join(wals_dir, '000000010000000000000004')
|
file = os.path.join(wals_dir, '000000010000000000000004')
|
||||||
|
|
||||||
|
# corrupt file
|
||||||
print(file)
|
print(file)
|
||||||
with open(file, "rb+", 0) as f:
|
with open(file, "rb+", 0) as f:
|
||||||
f.seek(42)
|
f.seek(42)
|
||||||
@ -790,7 +807,14 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
f.close
|
f.close
|
||||||
|
|
||||||
if self.archive_compress:
|
if self.archive_compress:
|
||||||
file = file[:-3]
|
# compress corrupted file and replace with it old file
|
||||||
|
with open(file, 'rb') as f_in, gzip.open(original_file, 'wb', compresslevel=1) as f_out:
|
||||||
|
shutil.copyfileobj(f_in, f_out)
|
||||||
|
|
||||||
|
file = os.path.join(wals_dir, '000000010000000000000004.gz')
|
||||||
|
|
||||||
|
#if self.archive_compress:
|
||||||
|
# file = file[:-3]
|
||||||
|
|
||||||
# Single-thread PAGE backup
|
# Single-thread PAGE backup
|
||||||
try:
|
try:
|
||||||
@ -915,9 +939,6 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
print(file_destination)
|
print(file_destination)
|
||||||
os.rename(file, file_destination)
|
os.rename(file, file_destination)
|
||||||
|
|
||||||
if self.archive_compress:
|
|
||||||
file_destination = file_destination[:-3]
|
|
||||||
|
|
||||||
# Single-thread PAGE backup
|
# Single-thread PAGE backup
|
||||||
try:
|
try:
|
||||||
self.backup_node(
|
self.backup_node(
|
||||||
|
109
tests/replica.py
109
tests/replica.py
@ -236,12 +236,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
pgbench = master.pgbench(
|
pgbench = master.pgbench(
|
||||||
options=['-T', '30', '-c', '2', '--no-vacuum'])
|
options=['-T', '30', '-c', '2', '--no-vacuum'])
|
||||||
|
|
||||||
# master.psql(
|
|
||||||
# "postgres",
|
|
||||||
# "insert into t_heap as select i as id, md5(i::text) as text, "
|
|
||||||
# "md5(repeat(i::text,10))::tsvector as tsvector "
|
|
||||||
# "from generate_series(512,25120) i")
|
|
||||||
|
|
||||||
backup_id = self.backup_node(
|
backup_id = self.backup_node(
|
||||||
backup_dir, 'replica',
|
backup_dir, 'replica',
|
||||||
replica, backup_type='page',
|
replica, backup_type='page',
|
||||||
@ -449,106 +443,3 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
# Clean after yourself
|
# Clean after yourself
|
||||||
self.del_test_dir(module_name, fname)
|
self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
@unittest.skip("skip")
|
|
||||||
def test_make_block_from_future(self):
|
|
||||||
"""
|
|
||||||
make archive master, take full backups from master,
|
|
||||||
restore full backup as replica, launch pgbench,
|
|
||||||
"""
|
|
||||||
fname = self.id().split('.')[3]
|
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
||||||
master = self.make_simple_node(
|
|
||||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
|
||||||
set_replication=True,
|
|
||||||
initdb_params=['--data-checksums'],
|
|
||||||
pg_options={
|
|
||||||
'wal_level': 'replica',
|
|
||||||
'max_wal_senders': '2'}
|
|
||||||
)
|
|
||||||
self.init_pb(backup_dir)
|
|
||||||
self.add_instance(backup_dir, 'master', master)
|
|
||||||
self.set_archiving(backup_dir, 'master', master)
|
|
||||||
# force more frequent wal switch
|
|
||||||
#master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
|
|
||||||
master.slow_start()
|
|
||||||
|
|
||||||
replica = self.make_simple_node(
|
|
||||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
|
||||||
replica.cleanup()
|
|
||||||
|
|
||||||
self.backup_node(backup_dir, 'master', master)
|
|
||||||
|
|
||||||
self.restore_node(
|
|
||||||
backup_dir, 'master', replica, options=['-R'])
|
|
||||||
|
|
||||||
# Settings for Replica
|
|
||||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
|
||||||
replica.append_conf(
|
|
||||||
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
|
|
||||||
replica.append_conf(
|
|
||||||
'postgresql.auto.conf', 'hot_standby = on')
|
|
||||||
|
|
||||||
replica.slow_start(replica=True)
|
|
||||||
|
|
||||||
self.add_instance(backup_dir, 'replica', replica)
|
|
||||||
|
|
||||||
replica.safe_psql(
|
|
||||||
'postgres',
|
|
||||||
'checkpoint')
|
|
||||||
|
|
||||||
master.pgbench_init(scale=10)
|
|
||||||
|
|
||||||
self.wait_until_replica_catch_with_master(master, replica)
|
|
||||||
|
|
||||||
|
|
||||||
# print(replica.safe_psql(
|
|
||||||
# 'postgres',
|
|
||||||
# 'select * from pg_catalog.pg_last_xlog_receive_location()'))
|
|
||||||
#
|
|
||||||
# print(replica.safe_psql(
|
|
||||||
# 'postgres',
|
|
||||||
# 'select * from pg_catalog.pg_last_xlog_replay_location()'))
|
|
||||||
#
|
|
||||||
# print(replica.safe_psql(
|
|
||||||
# 'postgres',
|
|
||||||
# 'select * from pg_catalog.pg_control_checkpoint()'))
|
|
||||||
#
|
|
||||||
# replica.safe_psql(
|
|
||||||
# 'postgres',
|
|
||||||
# 'checkpoint')
|
|
||||||
|
|
||||||
pgbench = master.pgbench(options=['-T', '30', '-c', '2', '--no-vacuum'])
|
|
||||||
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
#self.backup_node(backup_dir, 'replica', replica, options=['--stream'])
|
|
||||||
exit(1)
|
|
||||||
self.backup_node(backup_dir, 'replica', replica)
|
|
||||||
pgbench.wait()
|
|
||||||
|
|
||||||
# pgbench
|
|
||||||
master.safe_psql(
|
|
||||||
"postgres",
|
|
||||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
|
||||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
|
||||||
"from generate_series(0,256000) i")
|
|
||||||
|
|
||||||
|
|
||||||
master.safe_psql(
|
|
||||||
'postgres',
|
|
||||||
'checkpoint')
|
|
||||||
|
|
||||||
replica.safe_psql(
|
|
||||||
'postgres',
|
|
||||||
'checkpoint')
|
|
||||||
|
|
||||||
replica.safe_psql(
|
|
||||||
'postgres',
|
|
||||||
'select * from pg_')
|
|
||||||
|
|
||||||
self.backup_node(backup_dir, 'replica', replica)
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
# Clean after yourself
|
|
||||||
self.del_test_dir(module_name, fname)
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user