You've already forked pg_probackup
mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-06-17 03:37:36 +02:00
tests: archive compression
This commit is contained in:
@ -4,8 +4,16 @@
|
|||||||
Note: For now there are tests only for Linix
|
Note: For now there are tests only for Linix
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
Check physical correctness of restored instances.
|
||||||
|
Apply this patch to disable HINT BITS: https://gist.github.com/gsmol/2bb34fd3ba31984369a72cc1c27a36b6
|
||||||
|
export PG_PROBACKUP_PARANOIA=ON
|
||||||
|
|
||||||
|
Check archive compression:
|
||||||
|
export ARCHIVE_COMPRESSION=ON
|
||||||
|
|
||||||
export PG_CONFIG=/path/to/pg_config
|
export PG_CONFIG=/path/to/pg_config
|
||||||
export PGPRO_PARANOIA_MODE=ON/OFF
|
pip install testgres=0.4.0
|
||||||
python -m unittest [-v] tests
|
python -m unittest [-v] tests
|
||||||
```
|
```
|
||||||
|
@ -26,8 +26,6 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.init_pb(backup_dir)
|
self.init_pb(backup_dir)
|
||||||
self.add_instance(backup_dir, 'node', node)
|
self.add_instance(backup_dir, 'node', node)
|
||||||
self.set_archiving(backup_dir, 'node', node)
|
self.set_archiving(backup_dir, 'node', node)
|
||||||
# force more frequent wal switch
|
|
||||||
node.append_conf('postgresql.auto.conf', 'archive_timeout = 30')
|
|
||||||
node.start()
|
node.start()
|
||||||
|
|
||||||
node.safe_psql(
|
node.safe_psql(
|
||||||
@ -206,7 +204,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
set_replication=True,
|
set_replication=True,
|
||||||
initdb_params=['--data-checksums'],
|
initdb_params=['--data-checksums'],
|
||||||
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s', 'archive_timeout': '1'}
|
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s'}
|
||||||
)
|
)
|
||||||
self.init_pb(backup_dir)
|
self.init_pb(backup_dir)
|
||||||
self.add_instance(backup_dir, 'node', node)
|
self.add_instance(backup_dir, 'node', node)
|
||||||
@ -257,7 +255,6 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.init_pb(backup_dir)
|
self.init_pb(backup_dir)
|
||||||
# ADD INSTANCE 'MASTER'
|
# ADD INSTANCE 'MASTER'
|
||||||
self.add_instance(backup_dir, 'master', master)
|
self.add_instance(backup_dir, 'master', master)
|
||||||
# force more frequent wal switch
|
|
||||||
master.start()
|
master.start()
|
||||||
|
|
||||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||||
@ -386,6 +383,93 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.expectedFailure
|
# @unittest.expectedFailure
|
||||||
@unittest.skip("skip")
|
@unittest.skip("skip")
|
||||||
def test_archive_compress(self):
|
def test_archive_compress(self):
|
||||||
|
"""Test compression"""
|
||||||
|
fname = self.id().split('.')[3]
|
||||||
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
|
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
|
set_replication=True,
|
||||||
|
initdb_params=['--data-checksums'],
|
||||||
|
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s'}
|
||||||
|
)
|
||||||
|
self.init_pb(backup_dir)
|
||||||
|
self.add_instance(backup_dir, 'node', node)
|
||||||
|
self.set_archiving(backup_dir, 'node', node)
|
||||||
|
node.start()
|
||||||
|
|
||||||
|
self.backup_node(backup_dir, 'node', node)
|
||||||
|
|
||||||
|
node.safe_psql(
|
||||||
|
"postgres",
|
||||||
|
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
|
||||||
|
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||||
|
|
||||||
|
self.backup_node(backup_dir, 'node', node, backup_type='page')
|
||||||
|
|
||||||
|
node.cleanup()
|
||||||
|
self.restore_node(backup_dir, 'node', node)
|
||||||
|
|
||||||
|
node.start()
|
||||||
|
|
||||||
|
self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
|
||||||
|
'data after restore not equal to original data')
|
||||||
|
|
||||||
|
|
||||||
|
self.backup_node(backup_dir, 'node', node)
|
||||||
|
|
||||||
|
node.safe_psql(
|
||||||
|
"postgres",
|
||||||
|
"insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(10000,20000) i")
|
||||||
|
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||||
|
|
||||||
|
self.backup_node(backup_dir, 'node', node, backup_type='page')
|
||||||
|
|
||||||
|
node.cleanup()
|
||||||
|
self.restore_node(backup_dir, 'node', node)
|
||||||
|
|
||||||
|
node.start()
|
||||||
|
|
||||||
|
self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
|
||||||
|
'data after restore not equal to original data')
|
||||||
|
|
||||||
|
# Clean after yourself
|
||||||
|
# self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
|
# @unittest.expectedFailure
|
||||||
|
@unittest.skip("skip")
|
||||||
|
def test_archive_pg_receivexlog(self):
|
||||||
|
"""Description in jira issue PGPRO-434"""
|
||||||
|
fname = self.id().split('.')[3]
|
||||||
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
|
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||||
|
set_replication=True,
|
||||||
|
initdb_params=['--data-checksums'],
|
||||||
|
pg_options={'wal_level': 'replica', 'max_wal_senders': '2', 'checkpoint_timeout': '30s'}
|
||||||
|
)
|
||||||
|
self.init_pb(backup_dir)
|
||||||
|
self.add_instance(backup_dir, 'node', node)
|
||||||
|
self.set_archiving(backup_dir, 'node', node)
|
||||||
|
node.start()
|
||||||
|
|
||||||
|
node.safe_psql(
|
||||||
|
"postgres",
|
||||||
|
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100) i")
|
||||||
|
|
||||||
|
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||||
|
self.backup_node(backup_dir, 'node', node)
|
||||||
|
|
||||||
|
node.cleanup()
|
||||||
|
|
||||||
|
self.restore_node(backup_dir, 'node', node, backup_type='page')
|
||||||
|
node.start()
|
||||||
|
|
||||||
|
self.assertEqual(result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
|
||||||
|
'data after restore not equal to original data')
|
||||||
|
# Clean after yourself
|
||||||
|
# self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
|
# @unittest.expectedFailure
|
||||||
|
@unittest.skip("skip")
|
||||||
|
def test_archive_pg_receivexlog_compression_pg_10(self):
|
||||||
"""Description in jira issue PGPRO-434"""
|
"""Description in jira issue PGPRO-434"""
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
@ -397,8 +481,6 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.init_pb(backup_dir)
|
self.init_pb(backup_dir)
|
||||||
self.add_instance(backup_dir, 'node', node)
|
self.add_instance(backup_dir, 'node', node)
|
||||||
self.set_archiving(backup_dir, 'node', node)
|
self.set_archiving(backup_dir, 'node', node)
|
||||||
# force more frequent wal switch
|
|
||||||
node.append_conf('postgresql.auto.conf', 'archive_timeout = 30')
|
|
||||||
node.start()
|
node.start()
|
||||||
|
|
||||||
node.safe_psql(
|
node.safe_psql(
|
||||||
|
@ -125,10 +125,14 @@ class ProbackupTest(object):
|
|||||||
self.test_env["LC_TIME"] = "C"
|
self.test_env["LC_TIME"] = "C"
|
||||||
|
|
||||||
self.paranoia = False
|
self.paranoia = False
|
||||||
if 'PGPRO_PARANOIA_MODE' in self.test_env:
|
if 'PG_PROBACKUP_PARANOIA' in self.test_env:
|
||||||
if self.test_env['PGPRO_PARANOIA_MODE'] == 'ON':
|
if self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON':
|
||||||
self.paranoia = True
|
self.paranoia = True
|
||||||
|
|
||||||
|
if 'ARCHIVE_COMPRESSION' in self.test_env:
|
||||||
|
if self.test_env['ARCHIVE_COMPRESSION'] == 'ON':
|
||||||
|
self.archive_compress = True
|
||||||
|
|
||||||
self.helpers_path = os.path.dirname(os.path.realpath(__file__))
|
self.helpers_path = os.path.dirname(os.path.realpath(__file__))
|
||||||
self.dir_path = os.path.abspath(os.path.join(self.helpers_path, os.pardir))
|
self.dir_path = os.path.abspath(os.path.join(self.helpers_path, os.pardir))
|
||||||
self.tmp_path = os.path.abspath(os.path.join(self.dir_path, 'tmp_dirs'))
|
self.tmp_path = os.path.abspath(os.path.join(self.dir_path, 'tmp_dirs'))
|
||||||
@ -559,7 +563,7 @@ class ProbackupTest(object):
|
|||||||
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
|
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
|
||||||
return out_dict
|
return out_dict
|
||||||
|
|
||||||
def set_archiving(self, backup_dir, instance, node, replica=False, compress=False):
|
def set_archiving(self, backup_dir, instance, node, replica=False):
|
||||||
|
|
||||||
if replica:
|
if replica:
|
||||||
archive_mode = 'always'
|
archive_mode = 'always'
|
||||||
@ -576,7 +580,7 @@ class ProbackupTest(object):
|
|||||||
"archive_mode = {0}".format(archive_mode)
|
"archive_mode = {0}".format(archive_mode)
|
||||||
)
|
)
|
||||||
if os.name == 'posix':
|
if os.name == 'posix':
|
||||||
if compress:
|
if self.archive_compress:
|
||||||
node.append_conf(
|
node.append_conf(
|
||||||
"postgresql.auto.conf",
|
"postgresql.auto.conf",
|
||||||
"archive_command = '{0} archive-push -B {1} --instance={2} --compress --wal-file-path %p --wal-file-name %f'".format(
|
"archive_command = '{0} archive-push -B {1} --instance={2} --compress --wal-file-path %p --wal-file-name %f'".format(
|
||||||
|
Reference in New Issue
Block a user