1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2024-12-05 11:00:22 +02:00
pg_probackup/tests/cfs_backup.py

1161 lines
40 KiB
Python
Raw Normal View History

2017-09-28 20:09:35 +02:00
import os
import unittest
2017-10-02 13:53:24 +02:00
import random
2017-10-19 17:26:24 +02:00
import shutil
2017-09-28 20:09:35 +02:00
2017-10-02 13:53:24 +02:00
from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file
2017-09-28 20:09:35 +02:00
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
2017-10-03 13:59:30 +02:00
module_name = 'cfs_backup'
2017-09-28 20:09:35 +02:00
tblspace_name = 'cfs_tblspace'
2017-10-02 13:53:24 +02:00
class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
# --- Begin --- #
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def setUp(self):
2017-10-02 13:53:24 +02:00
self.fname = self.id().split('.')[3]
2018-01-25 20:39:16 +02:00
self.backup_dir = os.path.join(
self.tmp_path, module_name, self.fname, 'backup')
2017-10-02 13:53:24 +02:00
self.node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, self.fname),
2017-09-28 20:09:35 +02:00
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
2018-01-25 20:39:16 +02:00
'ptrack_enable': 'on',
2017-09-28 20:09:35 +02:00
'cfs_encryption': 'off',
2017-10-19 17:26:24 +02:00
'max_wal_senders': '2',
'shared_buffers': '200MB'
2017-09-28 20:09:35 +02:00
}
)
2017-10-02 13:53:24 +02:00
self.init_pb(self.backup_dir)
self.add_instance(self.backup_dir, 'node', self.node)
self.set_archiving(self.backup_dir, 'node', self.node)
2017-09-28 20:09:35 +02:00
2018-12-25 16:48:49 +02:00
self.node.slow_start()
2017-09-28 20:09:35 +02:00
2018-01-28 03:36:27 +02:00
self.create_tblspace_in_node(self.node, tblspace_name, cfs=True)
2017-10-19 17:26:24 +02:00
2017-10-02 13:53:24 +02:00
tblspace = self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(
tblspace_name)
2017-09-28 20:09:35 +02:00
)
self.assertTrue(
tblspace_name in tblspace and "compression=true" in tblspace,
2018-01-25 20:39:16 +02:00
"ERROR: The tablespace not created "
"or it create without compressions"
2017-09-28 20:09:35 +02:00
)
2017-09-28 20:09:35 +02:00
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_name(
[self.get_tblspace_path(self.node, tblspace_name)],
['pg_compression']),
2017-09-28 20:09:35 +02:00
"ERROR: File pg_compression not found"
)
2017-10-02 13:53:24 +02:00
# --- Section: Full --- #
2017-09-28 20:09:35 +02:00
# @unittest.expectedFailure
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_fullbackup_empty_tablespace(self):
2017-12-22 00:39:16 +02:00
"""Case: Check fullbackup empty compressed tablespace"""
2017-09-28 20:09:35 +02:00
backup_id = None
try:
2018-01-25 20:39:16 +02:00
backup_id = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='full')
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
show_backup = self.show_pb(self.backup_dir, 'node', backup_id)
2017-09-28 20:09:35 +02:00
self.assertEqual(
"OK",
show_backup["status"],
2018-01-25 20:39:16 +02:00
"ERROR: Full backup status is not valid. \n "
"Current backup status={0}".format(show_backup["status"])
2017-09-28 20:09:35 +02:00
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_name(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['pg_compression']),
2017-09-28 20:09:35 +02:00
"ERROR: File pg_compression not found in backup dir"
)
# @unittest.expectedFailure
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_fullbackup_empty_tablespace_stream(self):
2017-12-22 00:39:16 +02:00
"""Case: Check fullbackup empty compressed tablespace with options stream"""
2017-09-28 20:09:35 +02:00
backup_id = None
try:
2018-01-25 20:39:16 +02:00
backup_id = self.backup_node(
self.backup_dir, 'node', self.node,
backup_type='full', options=['--stream'])
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-19 17:26:24 +02:00
2017-10-02 13:53:24 +02:00
show_backup = self.show_pb(self.backup_dir, 'node', backup_id)
2017-09-28 20:09:35 +02:00
self.assertEqual(
"OK",
show_backup["status"],
2018-01-25 20:39:16 +02:00
"ERROR: Full backup status is not valid. \n "
"Current backup status={0}".format(show_backup["status"])
2017-09-28 20:09:35 +02:00
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_name(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['pg_compression']),
2017-09-28 20:09:35 +02:00
"ERROR: File pg_compression not found in backup dir"
)
# @unittest.expectedFailure
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
# PGPRO-1018 invalid file size
def test_fullbackup_after_create_table(self):
2017-12-22 00:39:16 +02:00
"""Case: Make full backup after created table in the tablespace"""
if not self.enterprise:
return
2017-09-28 20:09:35 +02:00
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,256) i".format('t1', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id = None
try:
2018-01-25 20:39:16 +02:00
backup_id = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='full')
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
2017-10-19 17:26:24 +02:00
"\n ERROR: {0}\n CMD: {1}".format(
repr(e.message),
repr(self.cmd)
2017-09-28 20:09:35 +02:00
)
)
2017-10-19 17:26:24 +02:00
return False
2017-10-02 13:53:24 +02:00
show_backup = self.show_pb(self.backup_dir, 'node', backup_id)
2017-09-28 20:09:35 +02:00
self.assertEqual(
"OK",
show_backup["status"],
2018-01-25 20:39:16 +02:00
"ERROR: Full backup status is not valid. \n "
"Current backup status={0}".format(show_backup["status"])
2017-09-28 20:09:35 +02:00
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_name(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['pg_compression']),
"ERROR: File pg_compression not found in {0}".format(
os.path.join(self.backup_dir, 'node', backup_id))
2017-09-28 20:09:35 +02:00
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_extensions(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['.cfm']),
2017-10-02 13:53:24 +02:00
"ERROR: .cfm files not found in backup dir"
2017-09-28 20:09:35 +02:00
)
# @unittest.expectedFailure
2017-09-28 20:09:35 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
# PGPRO-1018 invalid file size
def test_fullbackup_after_create_table_stream(self):
"""
Case: Make full backup after created table in the tablespace with option --stream
"""
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,256) i".format('t1', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id = None
try:
2018-01-25 20:39:16 +02:00
backup_id = self.backup_node(
self.backup_dir, 'node', self.node,
backup_type='full', options=['--stream'])
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
show_backup = self.show_pb(self.backup_dir, 'node', backup_id)
2017-09-28 20:09:35 +02:00
self.assertEqual(
"OK",
show_backup["status"],
2018-01-25 20:39:16 +02:00
"ERROR: Full backup status is not valid. \n "
"Current backup status={0}".format(show_backup["status"])
2017-09-28 20:09:35 +02:00
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_name(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['pg_compression']),
2017-09-28 20:09:35 +02:00
"ERROR: File pg_compression not found in backup dir"
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_extensions(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['.cfm']),
2017-10-02 13:53:24 +02:00
"ERROR: .cfm files not found in backup dir"
2017-09-28 20:09:35 +02:00
)
2017-10-02 13:53:24 +02:00
# --- Section: Incremental from empty tablespace --- #
# @unittest.expectedFailure
2017-09-28 20:09:35 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_fullbackup_empty_tablespace_ptrack_after_create_table(self):
"""
Case: Make full backup before created table in the tablespace.
Make ptrack backup after create table
"""
try:
2018-01-25 20:39:16 +02:00
self.backup_node(
self.backup_dir, 'node', self.node, backup_type='full')
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,256) i".format('t1', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id = None
try:
2018-01-25 20:39:16 +02:00
backup_id = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='ptrack')
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Incremental backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
show_backup = self.show_pb(self.backup_dir, 'node', backup_id)
2017-09-28 20:09:35 +02:00
self.assertEqual(
"OK",
show_backup["status"],
2018-01-25 20:39:16 +02:00
"ERROR: Incremental backup status is not valid. \n "
"Current backup status={0}".format(show_backup["status"])
2017-09-28 20:09:35 +02:00
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_name(
[self.get_tblspace_path(self.node, tblspace_name)],
['pg_compression']),
2017-09-28 20:09:35 +02:00
"ERROR: File pg_compression not found"
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_extensions(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['.cfm']),
2017-10-02 13:53:24 +02:00
"ERROR: .cfm files not found in backup dir"
2017-09-28 20:09:35 +02:00
)
# @unittest.expectedFailure
2017-09-28 20:09:35 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_fullbackup_empty_tablespace_ptrack_after_create_table_stream(self):
"""
Case: Make full backup before created table in the tablespace.
Make ptrack backup after create table
"""
try:
2018-01-25 20:39:16 +02:00
self.backup_node(
self.backup_dir, 'node', self.node,
backup_type='full', options=['--stream'])
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,256) i".format('t1', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id = None
try:
2018-01-25 20:39:16 +02:00
backup_id = self.backup_node(
self.backup_dir, 'node', self.node,
backup_type='ptrack', options=['--stream'])
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Incremental backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
show_backup = self.show_pb(self.backup_dir, 'node', backup_id)
2017-09-28 20:09:35 +02:00
self.assertEqual(
"OK",
show_backup["status"],
2018-01-25 20:39:16 +02:00
"ERROR: Incremental backup status is not valid. \n "
"Current backup status={0}".format(show_backup["status"])
2017-09-28 20:09:35 +02:00
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_name(
[self.get_tblspace_path(self.node, tblspace_name)],
['pg_compression']),
2017-09-28 20:09:35 +02:00
"ERROR: File pg_compression not found"
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_extensions(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['.cfm']),
2017-10-02 13:53:24 +02:00
"ERROR: .cfm files not found in backup dir"
2017-09-28 20:09:35 +02:00
)
self.assertFalse(
2018-01-25 20:39:16 +02:00
find_by_extensions(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['_ptrack']),
2017-09-28 20:09:35 +02:00
"ERROR: _ptrack files was found in backup dir"
)
# @unittest.expectedFailure
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_fullbackup_empty_tablespace_page_after_create_table(self):
"""
Case: Make full backup before created table in the tablespace.
Make page backup after create table
"""
try:
2018-01-25 20:39:16 +02:00
self.backup_node(
self.backup_dir, 'node', self.node, backup_type='full')
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,256) i".format('t1', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id = None
try:
2018-01-25 20:39:16 +02:00
backup_id = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='page')
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Incremental backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
show_backup = self.show_pb(self.backup_dir, 'node', backup_id)
2017-09-28 20:09:35 +02:00
self.assertEqual(
"OK",
show_backup["status"],
2018-01-25 20:39:16 +02:00
"ERROR: Incremental backup status is not valid. \n "
"Current backup status={0}".format(show_backup["status"])
2017-09-28 20:09:35 +02:00
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_name(
[self.get_tblspace_path(self.node, tblspace_name)],
['pg_compression']),
2017-09-28 20:09:35 +02:00
"ERROR: File pg_compression not found"
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_extensions(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['.cfm']),
2017-10-02 13:53:24 +02:00
"ERROR: .cfm files not found in backup dir"
2017-09-28 20:09:35 +02:00
)
# @unittest.expectedFailure
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_fullbackup_empty_tablespace_page_after_create_table_stream(self):
"""
Case: Make full backup before created table in the tablespace.
Make page backup after create table
"""
try:
2018-01-25 20:39:16 +02:00
self.backup_node(
self.backup_dir, 'node', self.node,
backup_type='full', options=['--stream'])
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,256) i".format('t1', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id = None
try:
2018-01-25 20:39:16 +02:00
backup_id = self.backup_node(
self.backup_dir, 'node', self.node,
backup_type='page', options=['--stream'])
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Incremental backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
show_backup = self.show_pb(self.backup_dir, 'node', backup_id)
2017-09-28 20:09:35 +02:00
self.assertEqual(
"OK",
show_backup["status"],
2018-01-25 20:39:16 +02:00
"ERROR: Incremental backup status is not valid. \n "
"Current backup status={0}".format(show_backup["status"])
2017-09-28 20:09:35 +02:00
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_name(
[self.get_tblspace_path(self.node, tblspace_name)],
['pg_compression']),
2017-09-28 20:09:35 +02:00
"ERROR: File pg_compression not found"
)
self.assertTrue(
2018-01-25 20:39:16 +02:00
find_by_extensions(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['.cfm']),
2017-10-02 13:53:24 +02:00
"ERROR: .cfm files not found in backup dir"
2017-09-28 20:09:35 +02:00
)
self.assertFalse(
2018-01-25 20:39:16 +02:00
find_by_extensions(
[os.path.join(self.backup_dir, 'backups', 'node', backup_id)],
['_ptrack']),
2017-09-28 20:09:35 +02:00
"ERROR: _ptrack files was found in backup dir"
)
2017-10-02 13:53:24 +02:00
# --- Section: Incremental from fill tablespace --- #
2018-01-25 20:39:16 +02:00
@unittest.expectedFailure
2017-09-28 20:09:35 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_fullbackup_after_create_table_ptrack_after_create_table(self):
"""
Case: Make full backup before created table in the tablespace.
Make ptrack backup after create table.
Check: incremental backup will not greater as full
"""
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,1005000) i".format('t1', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id_full = None
try:
2018-01-25 20:39:16 +02:00
backup_id_full = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='full')
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,10) i".format('t2', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id_ptrack = None
try:
2018-01-25 20:39:16 +02:00
backup_id_ptrack = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='ptrack')
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Incremental backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2018-01-25 20:39:16 +02:00
show_backup_full = self.show_pb(
self.backup_dir, 'node', backup_id_full)
show_backup_ptrack = self.show_pb(
self.backup_dir, 'node', backup_id_ptrack)
2017-09-28 20:09:35 +02:00
self.assertGreater(
show_backup_full["data-bytes"],
2018-01-25 20:39:16 +02:00
show_backup_ptrack["data-bytes"],
"ERROR: Size of incremental backup greater than full. \n "
"INFO: {0} >{1}".format(
2017-09-28 20:09:35 +02:00
show_backup_ptrack["data-bytes"],
show_backup_full["data-bytes"]
)
)
2018-01-25 20:39:16 +02:00
@unittest.expectedFailure
2017-09-28 20:09:35 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_fullbackup_after_create_table_ptrack_after_create_table_stream(self):
"""
2017-10-02 13:53:24 +02:00
Case: Make full backup before created table in the tablespace(--stream).
Make ptrack backup after create table(--stream).
2018-01-25 20:39:16 +02:00
Check: incremental backup size should not be greater than full
2017-09-28 20:09:35 +02:00
"""
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,1005000) i".format('t1', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id_full = None
try:
2018-01-25 20:39:16 +02:00
backup_id_full = self.backup_node(
self.backup_dir, 'node', self.node,
backup_type='full', options=['--stream'])
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,25) i".format('t2', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id_ptrack = None
try:
2018-01-25 20:39:16 +02:00
backup_id_ptrack = self.backup_node(
self.backup_dir, 'node', self.node,
backup_type='ptrack', options=['--stream'])
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Incremental backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2018-01-25 20:39:16 +02:00
show_backup_full = self.show_pb(
self.backup_dir, 'node', backup_id_full)
show_backup_ptrack = self.show_pb(
self.backup_dir, 'node', backup_id_ptrack)
2017-09-28 20:09:35 +02:00
self.assertGreater(
show_backup_full["data-bytes"],
2018-01-25 20:39:16 +02:00
show_backup_ptrack["data-bytes"],
"ERROR: Size of incremental backup greater than full. \n "
"INFO: {0} >{1}".format(
2017-09-28 20:09:35 +02:00
show_backup_ptrack["data-bytes"],
show_backup_full["data-bytes"]
)
)
2018-01-25 20:39:16 +02:00
@unittest.expectedFailure
2017-09-28 20:09:35 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_fullbackup_after_create_table_page_after_create_table(self):
"""
Case: Make full backup before created table in the tablespace.
Make ptrack backup after create table.
2018-01-25 20:39:16 +02:00
Check: incremental backup size should not be greater than full
2017-09-28 20:09:35 +02:00
"""
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,1005000) i".format('t1', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id_full = None
try:
2018-01-25 20:39:16 +02:00
backup_id_full = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='full')
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,10) i".format('t2', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id_page = None
try:
2018-01-25 20:39:16 +02:00
backup_id_page = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='page')
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Incremental backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2018-01-25 20:39:16 +02:00
show_backup_full = self.show_pb(
self.backup_dir, 'node', backup_id_full)
show_backup_page = self.show_pb(
self.backup_dir, 'node', backup_id_page)
2017-09-28 20:09:35 +02:00
self.assertGreater(
show_backup_full["data-bytes"],
2018-01-25 20:39:16 +02:00
show_backup_page["data-bytes"],
"ERROR: Size of incremental backup greater than full. \n "
"INFO: {0} >{1}".format(
2017-09-28 20:09:35 +02:00
show_backup_page["data-bytes"],
show_backup_full["data-bytes"]
)
)
2017-10-19 17:26:24 +02:00
# @unittest.expectedFailure
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-10-19 17:26:24 +02:00
def test_multiple_segments(self):
"""
Case: Make full backup before created table in the tablespace.
Make ptrack backup after create table.
Check: incremental backup will not greater as full
"""
self.node.safe_psql(
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,1005000) i".format(
't_heap', tblspace_name)
2017-10-19 17:26:24 +02:00
)
full_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap")
try:
2018-01-25 20:39:16 +02:00
backup_id_full = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='full')
2017-10-19 17:26:24 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.node.safe_psql(
"postgres",
2018-01-25 20:39:16 +02:00
"INSERT INTO {0} "
"SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,10) i".format(
't_heap')
2017-10-19 17:26:24 +02:00
)
page_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap")
try:
2018-01-25 20:39:16 +02:00
backup_id_page = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='page')
2017-10-19 17:26:24 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Incremental backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
2018-01-25 20:39:16 +02:00
show_backup_full = self.show_pb(
self.backup_dir, 'node', backup_id_full)
show_backup_page = self.show_pb(
self.backup_dir, 'node', backup_id_page)
2017-10-19 17:26:24 +02:00
self.assertGreater(
show_backup_full["data-bytes"],
2018-01-25 20:39:16 +02:00
show_backup_page["data-bytes"],
"ERROR: Size of incremental backup greater than full. \n "
"INFO: {0} >{1}".format(
2017-10-19 17:26:24 +02:00
show_backup_page["data-bytes"],
show_backup_full["data-bytes"]
)
)
# CHECK FULL BACKUP
self.node.stop()
self.node.cleanup()
2018-01-25 20:39:16 +02:00
shutil.rmtree(
self.get_tblspace_path(self.node, tblspace_name),
ignore_errors=True)
self.restore_node(
self.backup_dir, 'node', self.node,
backup_id=backup_id_full, options=["-j", "4"])
2018-12-25 16:48:49 +02:00
self.node.slow_start()
2018-01-25 20:39:16 +02:00
self.assertEqual(
full_result,
self.node.safe_psql("postgres", "SELECT * FROM t_heap"),
'Lost data after restore')
2017-10-19 17:26:24 +02:00
# CHECK PAGE BACKUP
self.node.stop()
self.node.cleanup()
2018-01-25 20:39:16 +02:00
shutil.rmtree(
self.get_tblspace_path(self.node, tblspace_name),
ignore_errors=True)
self.restore_node(
self.backup_dir, 'node', self.node,
backup_id=backup_id_page, options=["-j", "4"])
2018-12-25 16:48:49 +02:00
self.node.slow_start()
2018-01-25 20:39:16 +02:00
self.assertEqual(
page_result,
self.node.safe_psql("postgres", "SELECT * FROM t_heap"),
'Lost data after restore')
2017-10-19 17:26:24 +02:00
# @unittest.expectedFailure
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
def test_multiple_segments_in_multiple_tablespaces(self):
"""
Case: Make full backup before created table in the tablespace.
Make ptrack backup after create table.
Check: incremental backup will not greater as full
"""
tblspace_name_1 = 'tblspace_name_1'
tblspace_name_2 = 'tblspace_name_2'
2018-01-28 03:36:27 +02:00
self.create_tblspace_in_node(self.node, tblspace_name_1, cfs=True)
self.create_tblspace_in_node(self.node, tblspace_name_2, cfs=True)
self.node.safe_psql(
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,1005000) i".format(
't_heap_1', tblspace_name_1)
)
self.node.safe_psql(
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,1005000) i".format(
't_heap_2', tblspace_name_2)
)
2018-01-25 20:39:16 +02:00
full_result_1 = self.node.safe_psql(
"postgres", "SELECT * FROM t_heap_1")
full_result_2 = self.node.safe_psql(
"postgres", "SELECT * FROM t_heap_2")
try:
2018-01-25 20:39:16 +02:00
backup_id_full = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='full')
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.node.safe_psql(
"postgres",
2018-01-25 20:39:16 +02:00
"INSERT INTO {0} "
"SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,10) i".format(
't_heap_1')
)
self.node.safe_psql(
"postgres",
2018-01-25 20:39:16 +02:00
"INSERT INTO {0} "
"SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,10) i".format(
't_heap_2')
)
2018-01-25 20:39:16 +02:00
page_result_1 = self.node.safe_psql(
"postgres", "SELECT * FROM t_heap_1")
page_result_2 = self.node.safe_psql(
"postgres", "SELECT * FROM t_heap_2")
try:
2018-01-25 20:39:16 +02:00
backup_id_page = self.backup_node(
self.backup_dir, 'node', self.node, backup_type='page')
except ProbackupException as e:
self.fail(
"ERROR: Incremental backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
2018-01-25 20:39:16 +02:00
show_backup_full = self.show_pb(
self.backup_dir, 'node', backup_id_full)
show_backup_page = self.show_pb(
self.backup_dir, 'node', backup_id_page)
self.assertGreater(
show_backup_full["data-bytes"],
2018-01-25 20:39:16 +02:00
show_backup_page["data-bytes"],
"ERROR: Size of incremental backup greater than full. \n "
"INFO: {0} >{1}".format(
show_backup_page["data-bytes"],
show_backup_full["data-bytes"]
)
)
# CHECK FULL BACKUP
self.node.stop()
self.node.cleanup()
2018-01-25 20:39:16 +02:00
shutil.rmtree(
self.get_tblspace_path(self.node, tblspace_name),
ignore_errors=True)
shutil.rmtree(
self.get_tblspace_path(self.node, tblspace_name_1),
ignore_errors=True)
shutil.rmtree(
self.get_tblspace_path(self.node, tblspace_name_2),
ignore_errors=True)
self.restore_node(
self.backup_dir, 'node', self.node,
backup_id=backup_id_full, options=["-j", "4"])
2018-12-25 16:48:49 +02:00
self.node.slow_start()
2018-01-25 20:39:16 +02:00
self.assertEqual(
full_result_1,
self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"),
'Lost data after restore')
self.assertEqual(
full_result_2,
self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"),
'Lost data after restore')
# CHECK PAGE BACKUP
self.node.stop()
self.node.cleanup()
2018-01-25 20:39:16 +02:00
shutil.rmtree(
self.get_tblspace_path(self.node, tblspace_name),
ignore_errors=True)
shutil.rmtree(
self.get_tblspace_path(self.node, tblspace_name_1),
ignore_errors=True)
shutil.rmtree(
self.get_tblspace_path(self.node, tblspace_name_2),
ignore_errors=True)
self.restore_node(
self.backup_dir, 'node', self.node,
backup_id=backup_id_page, options=["-j", "4"])
2018-12-25 16:48:49 +02:00
self.node.slow_start()
2018-01-25 20:39:16 +02:00
self.assertEqual(
page_result_1,
self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"),
'Lost data after restore')
self.assertEqual(
page_result_2,
self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"),
'Lost data after restore')
2018-01-25 20:39:16 +02:00
@unittest.expectedFailure
2017-09-28 20:09:35 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_fullbackup_after_create_table_page_after_create_table_stream(self):
"""
2017-10-02 13:53:24 +02:00
Case: Make full backup before created table in the tablespace(--stream).
Make ptrack backup after create table(--stream).
2017-09-28 20:09:35 +02:00
Check: incremental backup will not greater as full
"""
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,1005000) i".format('t1', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id_full = None
try:
2018-01-25 20:39:16 +02:00
backup_id_full = self.backup_node(
self.backup_dir, 'node', self.node,
backup_type='full', options=['--stream'])
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
2017-09-28 20:09:35 +02:00
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,10) i".format('t2', tblspace_name)
2017-09-28 20:09:35 +02:00
)
backup_id_page = None
try:
2018-01-25 20:39:16 +02:00
backup_id_page = self.backup_node(
self.backup_dir, 'node', self.node,
backup_type='page', options=['--stream'])
2017-09-28 20:09:35 +02:00
except ProbackupException as e:
self.fail(
"ERROR: Incremental backup failed.\n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
2017-09-28 20:09:35 +02:00
)
)
2018-01-25 20:39:16 +02:00
show_backup_full = self.show_pb(
self.backup_dir, 'node', backup_id_full)
show_backup_page = self.show_pb(
self.backup_dir, 'node', backup_id_page)
2017-09-28 20:09:35 +02:00
self.assertGreater(
show_backup_full["data-bytes"],
2018-01-25 20:39:16 +02:00
show_backup_page["data-bytes"],
"ERROR: Size of incremental backup greater than full. \n "
"INFO: {0} >{1}".format(
2017-09-28 20:09:35 +02:00
show_backup_page["data-bytes"],
show_backup_full["data-bytes"]
)
)
2017-10-02 13:53:24 +02:00
# --- Make backup with not valid data(broken .cfm) --- #
2018-01-25 20:39:16 +02:00
@unittest.expectedFailure
2017-10-02 13:53:24 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_delete_random_cfm_file_from_tablespace_dir(self):
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,256) i".format('t1', tblspace_name)
2017-10-02 13:53:24 +02:00
)
2018-01-25 20:39:16 +02:00
list_cmf = find_by_extensions(
[self.get_tblspace_path(self.node, tblspace_name)],
['.cfm'])
2017-10-02 13:53:24 +02:00
self.assertTrue(
list_cmf,
"ERROR: .cfm-files not found into tablespace dir"
)
os.remove(random.choice(list_cmf))
self.assertRaises(
ProbackupException,
2018-01-25 20:39:16 +02:00
self.backup_node,
self.backup_dir,
'node',
self.node,
backup_type='full'
2017-10-02 13:53:24 +02:00
)
2017-09-28 20:09:35 +02:00
2018-01-25 20:39:16 +02:00
@unittest.expectedFailure
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_delete_file_pg_compression_from_tablespace_dir(self):
2018-01-25 20:39:16 +02:00
os.remove(
find_by_name(
[self.get_tblspace_path(self.node, tblspace_name)],
['pg_compression'])[0])
self.assertRaises(
ProbackupException,
2018-01-25 20:39:16 +02:00
self.backup_node,
self.backup_dir,
'node',
self.node,
backup_type='full'
)
2017-09-28 20:09:35 +02:00
2018-01-25 20:39:16 +02:00
@unittest.expectedFailure
2017-10-02 13:53:24 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_delete_random_data_file_from_tablespace_dir(self):
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,256) i".format('t1', tblspace_name)
2017-10-02 13:53:24 +02:00
)
2018-01-25 20:39:16 +02:00
list_data_files = find_by_pattern(
[self.get_tblspace_path(self.node, tblspace_name)],
'^.*/\d+$')
2017-10-02 13:53:24 +02:00
self.assertTrue(
list_data_files,
"ERROR: Files of data not found into tablespace dir"
)
os.remove(random.choice(list_data_files))
self.assertRaises(
ProbackupException,
2018-01-25 20:39:16 +02:00
self.backup_node,
self.backup_dir,
'node',
self.node,
backup_type='full'
2017-10-02 13:53:24 +02:00
)
2017-09-28 20:09:35 +02:00
2018-01-25 20:39:16 +02:00
@unittest.expectedFailure
2017-10-02 13:53:24 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_broken_random_cfm_file_into_tablespace_dir(self):
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,256) i".format('t1', tblspace_name)
2017-10-02 13:53:24 +02:00
)
2018-01-25 20:39:16 +02:00
list_cmf = find_by_extensions(
[self.get_tblspace_path(self.node, tblspace_name)],
['.cfm'])
2017-10-02 13:53:24 +02:00
self.assertTrue(
list_cmf,
"ERROR: .cfm-files not found into tablespace dir"
)
corrupt_file(random.choice(list_cmf))
self.assertRaises(
ProbackupException,
2018-01-25 20:39:16 +02:00
self.backup_node,
self.backup_dir,
'node',
self.node,
backup_type='full'
2017-10-02 13:53:24 +02:00
)
2017-09-28 20:09:35 +02:00
2018-01-25 20:39:16 +02:00
@unittest.expectedFailure
2017-10-02 13:53:24 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_broken_random_data_file_into_tablespace_dir(self):
2017-10-02 13:53:24 +02:00
self.node.safe_psql(
"postgres",
2018-01-25 20:39:16 +02:00
"CREATE TABLE {0} TABLESPACE {1} "
"AS SELECT i AS id, MD5(i::text) AS text, "
"MD5(repeat(i::text,10))::tsvector AS tsvector "
"FROM generate_series(0,256) i".format('t1', tblspace_name)
2017-10-02 13:53:24 +02:00
)
2018-01-25 20:39:16 +02:00
list_data_files = find_by_pattern(
[self.get_tblspace_path(self.node, tblspace_name)],
'^.*/\d+$')
2017-10-02 13:53:24 +02:00
self.assertTrue(
list_data_files,
"ERROR: Files of data not found into tablespace dir"
)
corrupt_file(random.choice(list_data_files))
self.assertRaises(
ProbackupException,
2018-01-25 20:39:16 +02:00
self.backup_node,
self.backup_dir,
'node',
self.node,
backup_type='full'
2017-10-02 13:53:24 +02:00
)
2017-09-28 20:09:35 +02:00
2018-01-25 20:39:16 +02:00
@unittest.expectedFailure
2017-10-02 13:53:24 +02:00
# @unittest.skip("skip")
2017-12-22 00:39:16 +02:00
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
2017-09-28 20:09:35 +02:00
def test_broken_file_pg_compression_into_tablespace_dir(self):
2018-01-25 20:39:16 +02:00
corrupted_file = find_by_name(
[self.get_tblspace_path(self.node, tblspace_name)],
['pg_compression'])[0]
2017-10-02 13:53:24 +02:00
self.assertTrue(
corrupt_file(corrupted_file),
"ERROR: File is not corrupted or it missing"
)
self.assertRaises(
ProbackupException,
2018-01-25 20:39:16 +02:00
self.backup_node,
self.backup_dir,
'node',
self.node,
backup_type='full'
)
# # --- End ---#
# @unittest.skipUnless(ProbackupTest.enterprise, 'skip')
# def tearDown(self):
# self.node.cleanup()
# self.del_test_dir(module_name, self.fname)
2017-10-03 13:59:30 +02:00
2017-10-19 17:26:24 +02:00
#class CfsBackupEncTest(CfsBackupNoEncTest):
# # --- Begin --- #
# def setUp(self):
# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key"
# super(CfsBackupEncTest, self).setUp()