2017-05-18 12:01:30 +02:00
|
|
|
import os
|
2017-06-27 07:42:52 +02:00
|
|
|
import unittest
|
|
|
|
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
|
2017-05-18 12:01:30 +02:00
|
|
|
from datetime import datetime, timedelta
|
|
|
|
import subprocess
|
|
|
|
|
|
|
|
|
2017-07-12 16:28:28 +02:00
|
|
|
module_name = 'pgpro589'
|
|
|
|
|
2017-05-18 12:01:30 +02:00
|
|
|
|
2017-07-12 16:28:28 +02:00
|
|
|
class ArchiveCheck(ProbackupTest, unittest.TestCase):
|
2017-05-22 13:17:43 +02:00
|
|
|
|
2017-05-18 12:01:30 +02:00
|
|
|
def test_pgpro589(self):
|
|
|
|
"""
|
|
|
|
https://jira.postgrespro.ru/browse/PGPRO-589
|
|
|
|
make node without archive support, make backup which should fail
|
2017-05-22 13:17:43 +02:00
|
|
|
check that backup status equal to ERROR
|
2017-05-18 12:01:30 +02:00
|
|
|
check that no files where copied to backup catalogue
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
2018-01-18 03:35:27 +02:00
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2017-05-18 12:01:30 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'wal_level': 'replica'}
|
|
|
|
)
|
2017-07-12 16:28:28 +02:00
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
2017-06-27 07:42:52 +02:00
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
# make erroneus archive_command
|
2017-05-22 13:17:43 +02:00
|
|
|
node.append_conf("postgresql.auto.conf", "archive_command = 'exit 0'")
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2017-05-18 12:01:30 +02:00
|
|
|
|
|
|
|
node.pgbench_init(scale=5)
|
|
|
|
pgbench = node.pgbench(
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.STDOUT,
|
|
|
|
options=["-c", "4", "-T", "10"]
|
|
|
|
)
|
|
|
|
pgbench.wait()
|
|
|
|
pgbench.stdout.close()
|
2018-01-18 03:35:27 +02:00
|
|
|
path = node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select pg_relation_filepath('pgbench_accounts')").rstrip().decode(
|
|
|
|
"utf-8")
|
2017-05-18 12:01:30 +02:00
|
|
|
|
2017-05-22 13:17:43 +02:00
|
|
|
try:
|
2018-01-18 03:35:27 +02:00
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=['--archive-timeout=10'])
|
2017-06-27 07:42:52 +02:00
|
|
|
# we should die here because exception is what we expect to happen
|
2018-01-18 03:35:27 +02:00
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because of missing archive wal "
|
|
|
|
"segment with start_lsn.\n Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
2017-06-27 07:42:52 +02:00
|
|
|
except ProbackupException as e:
|
|
|
|
self.assertTrue(
|
2018-01-18 03:35:27 +02:00
|
|
|
'INFO: Wait for WAL segment' in e.message and
|
|
|
|
'ERROR: Switched WAL segment' in e.message and
|
|
|
|
'could not be archived' in e.message,
|
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
2017-05-18 12:01:30 +02:00
|
|
|
|
2018-06-02 19:35:37 +02:00
|
|
|
backup_id = self.show_pb(backup_dir, 'node')[0]['id']
|
2018-01-18 03:35:27 +02:00
|
|
|
self.assertEqual(
|
|
|
|
'ERROR', self.show_pb(backup_dir, 'node', backup_id)['status'],
|
|
|
|
'Backup should have ERROR status')
|
|
|
|
file = os.path.join(
|
|
|
|
backup_dir, 'backups', 'node',
|
|
|
|
backup_id, 'database', path)
|
|
|
|
self.assertFalse(
|
|
|
|
os.path.isfile(file),
|
|
|
|
"\n Start LSN was not found in archive but datafiles where "
|
|
|
|
"copied to backup catalogue.\n For example: {0}\n "
|
|
|
|
"It is not optimal".format(file))
|
2017-06-27 07:42:52 +02:00
|
|
|
|
|
|
|
# Clean after yourself
|
2017-07-12 16:28:28 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|