2018-08-02 10:57:39 +02:00
|
|
|
# coding: utf-8
|
|
|
|
|
|
|
|
import unittest
|
|
|
|
import os
|
2018-10-27 11:17:22 +02:00
|
|
|
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
2018-12-13 10:45:04 +02:00
|
|
|
import shutil
|
2019-03-27 17:16:53 +02:00
|
|
|
from datetime import datetime, timedelta
|
2019-05-28 10:11:41 +02:00
|
|
|
import time
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
module_name = "merge"
|
|
|
|
|
|
|
|
|
|
|
|
class MergeTest(ProbackupTest, unittest.TestCase):
|
|
|
|
|
2019-05-28 11:41:03 +02:00
|
|
|
def test_basic_merge_full_page(self):
|
2018-08-02 10:57:39 +02:00
|
|
|
"""
|
|
|
|
Test MERGE command, it merges FULL backup with target PAGE backups
|
|
|
|
"""
|
|
|
|
fname = self.id().split(".")[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
|
|
|
|
|
|
|
|
# Initialize instance and backup directory
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2020-04-19 01:50:49 +02:00
|
|
|
initdb_params=["--data-checksums"])
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, "node", node)
|
|
|
|
self.set_archiving(backup_dir, "node", node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
# Do full backup
|
2020-02-21 21:19:52 +02:00
|
|
|
self.backup_node(backup_dir, "node", node, options=['--compress'])
|
2018-08-02 10:57:39 +02:00
|
|
|
show_backup = self.show_pb(backup_dir, "node")[0]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Fill with data
|
|
|
|
with node.connect() as conn:
|
|
|
|
conn.execute("create table test (id int)")
|
|
|
|
conn.execute(
|
|
|
|
"insert into test select i from generate_series(1,10) s(i)")
|
|
|
|
conn.commit()
|
|
|
|
|
|
|
|
# Do first page backup
|
2020-02-21 21:19:52 +02:00
|
|
|
self.backup_node(backup_dir, "node", node, backup_type="page", options=['--compress'])
|
2018-08-02 10:57:39 +02:00
|
|
|
show_backup = self.show_pb(backup_dir, "node")[1]
|
|
|
|
|
|
|
|
# sanity check
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "PAGE")
|
|
|
|
|
|
|
|
# Fill with data
|
|
|
|
with node.connect() as conn:
|
|
|
|
conn.execute(
|
|
|
|
"insert into test select i from generate_series(1,10) s(i)")
|
|
|
|
count1 = conn.execute("select count(*) from test")
|
|
|
|
conn.commit()
|
|
|
|
|
|
|
|
# Do second page backup
|
2020-02-21 21:19:52 +02:00
|
|
|
self.backup_node(
|
|
|
|
backup_dir, "node", node,
|
|
|
|
backup_type="page", options=['--compress'])
|
2018-08-02 10:57:39 +02:00
|
|
|
show_backup = self.show_pb(backup_dir, "node")[2]
|
|
|
|
page_id = show_backup["id"]
|
|
|
|
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
# sanity check
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "PAGE")
|
|
|
|
|
|
|
|
# Merge all backups
|
2018-10-05 16:26:39 +02:00
|
|
|
self.merge_backup(backup_dir, "node", page_id,
|
|
|
|
options=["-j", "4"])
|
2018-08-02 10:57:39 +02:00
|
|
|
show_backups = self.show_pb(backup_dir, "node")
|
|
|
|
|
|
|
|
# sanity check
|
|
|
|
self.assertEqual(len(show_backups), 1)
|
|
|
|
self.assertEqual(show_backups[0]["status"], "OK")
|
|
|
|
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Drop node and restore it
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
# Check physical correctness
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata_restored = self.pgdata_content(
|
|
|
|
node.data_dir, ignore_ptrack=False)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Check restored node
|
|
|
|
count2 = node.execute("postgres", "select count(*) from test")
|
|
|
|
self.assertEqual(count1, count2)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
node.cleanup()
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
def test_merge_compressed_backups(self):
|
|
|
|
"""
|
|
|
|
Test MERGE command with compressed backups
|
|
|
|
"""
|
|
|
|
fname = self.id().split(".")[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
|
|
|
|
|
|
|
|
# Initialize instance and backup directory
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2019-08-27 19:01:38 +02:00
|
|
|
initdb_params=["--data-checksums"])
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, "node", node)
|
|
|
|
self.set_archiving(backup_dir, "node", node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
# Do full compressed backup
|
2019-08-27 19:01:38 +02:00
|
|
|
self.backup_node(backup_dir, "node", node, options=['--compress'])
|
2018-08-02 10:57:39 +02:00
|
|
|
show_backup = self.show_pb(backup_dir, "node")[0]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Fill with data
|
|
|
|
with node.connect() as conn:
|
|
|
|
conn.execute("create table test (id int)")
|
|
|
|
conn.execute(
|
|
|
|
"insert into test select i from generate_series(1,10) s(i)")
|
|
|
|
count1 = conn.execute("select count(*) from test")
|
|
|
|
conn.commit()
|
|
|
|
|
|
|
|
# Do compressed page backup
|
|
|
|
self.backup_node(
|
2019-08-27 19:01:38 +02:00
|
|
|
backup_dir, "node", node, backup_type="page", options=['--compress'])
|
2018-08-02 10:57:39 +02:00
|
|
|
show_backup = self.show_pb(backup_dir, "node")[1]
|
|
|
|
page_id = show_backup["id"]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "PAGE")
|
|
|
|
|
|
|
|
# Merge all backups
|
2019-08-27 19:01:38 +02:00
|
|
|
self.merge_backup(backup_dir, "node", page_id, options=['-j2'])
|
2018-08-02 10:57:39 +02:00
|
|
|
show_backups = self.show_pb(backup_dir, "node")
|
|
|
|
|
|
|
|
self.assertEqual(len(show_backups), 1)
|
|
|
|
self.assertEqual(show_backups[0]["status"], "OK")
|
|
|
|
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Drop node and restore it
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Check restored node
|
|
|
|
count2 = node.execute("postgres", "select count(*) from test")
|
|
|
|
self.assertEqual(count1, count2)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
node.cleanup()
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2018-12-07 08:43:38 +02:00
|
|
|
def test_merge_compressed_backups_1(self):
|
2018-12-06 18:07:56 +02:00
|
|
|
"""
|
2018-12-07 08:43:38 +02:00
|
|
|
Test MERGE command with compressed backups
|
2018-12-06 18:07:56 +02:00
|
|
|
"""
|
|
|
|
fname = self.id().split(".")[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
|
|
|
|
|
|
|
|
# Initialize instance and backup directory
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2019-05-29 17:49:38 +02:00
|
|
|
set_replication=True, initdb_params=["--data-checksums"],
|
2018-12-06 18:07:56 +02:00
|
|
|
pg_options={
|
|
|
|
'autovacuum': 'off'
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, "node", node)
|
|
|
|
self.set_archiving(backup_dir, "node", node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Fill with data
|
|
|
|
node.pgbench_init(scale=5)
|
|
|
|
|
|
|
|
# Do compressed FULL backup
|
2019-08-27 19:01:38 +02:00
|
|
|
self.backup_node(backup_dir, "node", node, options=['--compress', '--stream'])
|
2018-12-06 18:07:56 +02:00
|
|
|
show_backup = self.show_pb(backup_dir, "node")[0]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Change data
|
|
|
|
pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
# Do compressed DELTA backup
|
|
|
|
self.backup_node(
|
2019-08-27 19:01:38 +02:00
|
|
|
backup_dir, "node", node,
|
|
|
|
backup_type="delta", options=['--compress', '--stream'])
|
2018-12-06 18:07:56 +02:00
|
|
|
|
|
|
|
# Change data
|
|
|
|
pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
2018-12-07 08:43:38 +02:00
|
|
|
# Do compressed PAGE backup
|
2018-12-06 18:07:56 +02:00
|
|
|
self.backup_node(
|
2019-08-27 19:01:38 +02:00
|
|
|
backup_dir, "node", node, backup_type="page", options=['--compress'])
|
2018-12-06 18:07:56 +02:00
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
show_backup = self.show_pb(backup_dir, "node")[2]
|
2018-12-07 08:43:38 +02:00
|
|
|
page_id = show_backup["id"]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "PAGE")
|
|
|
|
|
|
|
|
# Merge all backups
|
2019-08-27 19:01:38 +02:00
|
|
|
self.merge_backup(backup_dir, "node", page_id, options=['-j2'])
|
2018-12-07 08:43:38 +02:00
|
|
|
show_backups = self.show_pb(backup_dir, "node")
|
|
|
|
|
|
|
|
self.assertEqual(len(show_backups), 1)
|
|
|
|
self.assertEqual(show_backups[0]["status"], "OK")
|
|
|
|
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Drop node and restore it
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
pgdata_restored = self.pgdata_content(node.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
node.cleanup()
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
def test_merge_compressed_and_uncompressed_backups(self):
|
|
|
|
"""
|
|
|
|
Test MERGE command with compressed and uncompressed backups
|
|
|
|
"""
|
|
|
|
fname = self.id().split(".")[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
|
|
|
|
|
|
|
|
# Initialize instance and backup directory
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2019-05-29 17:49:38 +02:00
|
|
|
set_replication=True, initdb_params=["--data-checksums"],
|
2018-12-07 08:43:38 +02:00
|
|
|
pg_options={
|
|
|
|
'autovacuum': 'off'
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, "node", node)
|
|
|
|
self.set_archiving(backup_dir, "node", node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Fill with data
|
2020-05-18 00:40:37 +02:00
|
|
|
node.pgbench_init(scale=10)
|
2018-12-07 08:43:38 +02:00
|
|
|
|
|
|
|
# Do compressed FULL backup
|
|
|
|
self.backup_node(backup_dir, "node", node, options=[
|
|
|
|
'--compress-algorithm=zlib', '--stream'])
|
|
|
|
show_backup = self.show_pb(backup_dir, "node")[0]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Change data
|
2020-05-18 00:40:37 +02:00
|
|
|
pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
|
2018-12-07 08:43:38 +02:00
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
# Do compressed DELTA backup
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, "node", node, backup_type="delta",
|
2019-08-27 19:01:38 +02:00
|
|
|
options=['--compress', '--stream'])
|
2018-12-07 08:43:38 +02:00
|
|
|
|
|
|
|
# Change data
|
2020-05-18 00:40:37 +02:00
|
|
|
pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
|
2018-12-07 08:43:38 +02:00
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
# Do uncompressed PAGE backup
|
2019-08-27 19:01:38 +02:00
|
|
|
self.backup_node(backup_dir, "node", node, backup_type="page")
|
2018-12-07 08:43:38 +02:00
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
show_backup = self.show_pb(backup_dir, "node")[2]
|
2018-12-06 18:07:56 +02:00
|
|
|
page_id = show_backup["id"]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "PAGE")
|
|
|
|
|
|
|
|
# Merge all backups
|
2019-08-27 19:01:38 +02:00
|
|
|
self.merge_backup(backup_dir, "node", page_id, options=['-j2'])
|
2018-12-06 18:07:56 +02:00
|
|
|
show_backups = self.show_pb(backup_dir, "node")
|
|
|
|
|
|
|
|
self.assertEqual(len(show_backups), 1)
|
|
|
|
self.assertEqual(show_backups[0]["status"], "OK")
|
|
|
|
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Drop node and restore it
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
pgdata_restored = self.pgdata_content(node.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
node.cleanup()
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2018-12-07 22:39:39 +02:00
|
|
|
def test_merge_compressed_and_uncompressed_backups_1(self):
|
|
|
|
"""
|
|
|
|
Test MERGE command with compressed and uncompressed backups
|
|
|
|
"""
|
|
|
|
fname = self.id().split(".")[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
|
|
|
|
|
|
|
|
# Initialize instance and backup directory
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2019-05-29 17:49:38 +02:00
|
|
|
set_replication=True, initdb_params=["--data-checksums"],
|
2018-12-07 22:39:39 +02:00
|
|
|
pg_options={
|
|
|
|
'autovacuum': 'off'
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, "node", node)
|
|
|
|
self.set_archiving(backup_dir, "node", node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Fill with data
|
|
|
|
node.pgbench_init(scale=5)
|
|
|
|
|
|
|
|
# Do compressed FULL backup
|
|
|
|
self.backup_node(backup_dir, "node", node, options=[
|
|
|
|
'--compress-algorithm=zlib', '--stream'])
|
|
|
|
show_backup = self.show_pb(backup_dir, "node")[0]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Change data
|
|
|
|
pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
# Do uncompressed DELTA backup
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, "node", node, backup_type="delta",
|
|
|
|
options=['--stream'])
|
|
|
|
|
|
|
|
# Change data
|
|
|
|
pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
# Do compressed PAGE backup
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, "node", node, backup_type="page",
|
|
|
|
options=['--compress-algorithm=zlib'])
|
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
show_backup = self.show_pb(backup_dir, "node")[2]
|
|
|
|
page_id = show_backup["id"]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "PAGE")
|
|
|
|
|
|
|
|
# Merge all backups
|
|
|
|
self.merge_backup(backup_dir, "node", page_id)
|
|
|
|
show_backups = self.show_pb(backup_dir, "node")
|
|
|
|
|
|
|
|
self.assertEqual(len(show_backups), 1)
|
|
|
|
self.assertEqual(show_backups[0]["status"], "OK")
|
|
|
|
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Drop node and restore it
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
pgdata_restored = self.pgdata_content(node.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
node.cleanup()
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
def test_merge_compressed_and_uncompressed_backups_2(self):
|
|
|
|
"""
|
|
|
|
Test MERGE command with compressed and uncompressed backups
|
|
|
|
"""
|
|
|
|
fname = self.id().split(".")[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
|
|
|
|
|
|
|
|
# Initialize instance and backup directory
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2019-05-29 17:49:38 +02:00
|
|
|
set_replication=True, initdb_params=["--data-checksums"],
|
2018-12-07 22:39:39 +02:00
|
|
|
pg_options={
|
|
|
|
'autovacuum': 'off'
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, "node", node)
|
|
|
|
self.set_archiving(backup_dir, "node", node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Fill with data
|
|
|
|
node.pgbench_init(scale=5)
|
|
|
|
|
|
|
|
# Do uncompressed FULL backup
|
|
|
|
self.backup_node(backup_dir, "node", node)
|
|
|
|
show_backup = self.show_pb(backup_dir, "node")[0]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Change data
|
|
|
|
pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
# Do compressed DELTA backup
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, "node", node, backup_type="delta",
|
|
|
|
options=['--compress-algorithm=zlib', '--stream'])
|
|
|
|
|
|
|
|
# Change data
|
|
|
|
pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
# Do uncompressed PAGE backup
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, "node", node, backup_type="page")
|
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
show_backup = self.show_pb(backup_dir, "node")[2]
|
|
|
|
page_id = show_backup["id"]
|
|
|
|
|
|
|
|
self.assertEqual(show_backup["status"], "OK")
|
|
|
|
self.assertEqual(show_backup["backup-mode"], "PAGE")
|
|
|
|
|
|
|
|
# Merge all backups
|
|
|
|
self.merge_backup(backup_dir, "node", page_id)
|
|
|
|
show_backups = self.show_pb(backup_dir, "node")
|
|
|
|
|
|
|
|
self.assertEqual(len(show_backups), 1)
|
|
|
|
self.assertEqual(show_backups[0]["status"], "OK")
|
|
|
|
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
|
|
|
|
|
|
|
|
# Drop node and restore it
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
pgdata_restored = self.pgdata_content(node.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
node.cleanup()
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
|
2018-08-02 10:57:39 +02:00
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_merge_tablespaces(self):
|
2018-12-13 10:45:04 +02:00
|
|
|
"""
|
2018-12-13 12:21:57 +02:00
|
|
|
Create tablespace with table, take FULL backup,
|
|
|
|
create another tablespace with another table and drop previous
|
|
|
|
tablespace, take page backup, merge it and restore
|
|
|
|
|
2018-12-13 10:45:04 +02:00
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-12-13 10:45:04 +02:00
|
|
|
set_replication=True, initdb_params=['--data-checksums'],
|
|
|
|
pg_options={
|
|
|
|
'autovacuum': 'off'
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-12-13 10:45:04 +02:00
|
|
|
|
|
|
|
self.create_tblspace_in_node(node, 'somedata')
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap tablespace somedata as select i as id,"
|
|
|
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
|
|
|
" from generate_series(0,100) i"
|
|
|
|
)
|
|
|
|
# FULL backup
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
# Create new tablespace
|
|
|
|
self.create_tblspace_in_node(node, 'somedata1')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap1 tablespace somedata1 as select i as id,"
|
|
|
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
|
|
|
" from generate_series(0,100) i"
|
|
|
|
)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"drop table t_heap"
|
|
|
|
)
|
|
|
|
|
|
|
|
# Drop old tablespace
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"drop tablespace somedata"
|
|
|
|
)
|
|
|
|
|
|
|
|
# PAGE backup
|
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node, backup_type="page")
|
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
shutil.rmtree(
|
|
|
|
self.get_tblspace_path(node, 'somedata'),
|
|
|
|
ignore_errors=True)
|
|
|
|
shutil.rmtree(
|
|
|
|
self.get_tblspace_path(node, 'somedata1'),
|
|
|
|
ignore_errors=True)
|
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
self.merge_backup(backup_dir, 'node', backup_id)
|
|
|
|
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node, options=["-j", "4"])
|
|
|
|
|
|
|
|
pgdata_restored = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
# this compare should fall because we lost some directories
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_merge_tablespaces_1(self):
|
2018-08-02 10:57:39 +02:00
|
|
|
"""
|
2018-12-13 12:21:57 +02:00
|
|
|
Create tablespace with table, take FULL backup,
|
|
|
|
create another tablespace with another table, take page backup,
|
|
|
|
drop first tablespace and take delta backup,
|
|
|
|
merge it and restore
|
2018-08-02 10:57:39 +02:00
|
|
|
"""
|
2018-12-13 10:45:04 +02:00
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-12-13 10:45:04 +02:00
|
|
|
set_replication=True, initdb_params=['--data-checksums'],
|
|
|
|
pg_options={
|
|
|
|
'autovacuum': 'off'
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-12-13 10:45:04 +02:00
|
|
|
|
|
|
|
self.create_tblspace_in_node(node, 'somedata')
|
|
|
|
|
|
|
|
# FULL backup
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap tablespace somedata as select i as id,"
|
|
|
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
|
|
|
" from generate_series(0,100) i"
|
|
|
|
)
|
|
|
|
|
|
|
|
# CREATE NEW TABLESPACE
|
|
|
|
self.create_tblspace_in_node(node, 'somedata1')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap1 tablespace somedata1 as select i as id,"
|
|
|
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
|
|
|
" from generate_series(0,100) i"
|
|
|
|
)
|
|
|
|
|
|
|
|
# PAGE backup
|
|
|
|
self.backup_node(backup_dir, 'node', node, backup_type="page")
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"drop table t_heap"
|
|
|
|
)
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"drop tablespace somedata"
|
|
|
|
)
|
|
|
|
|
|
|
|
# DELTA backup
|
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type="delta")
|
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
shutil.rmtree(
|
|
|
|
self.get_tblspace_path(node, 'somedata'),
|
|
|
|
ignore_errors=True)
|
|
|
|
shutil.rmtree(
|
|
|
|
self.get_tblspace_path(node, 'somedata1'),
|
|
|
|
ignore_errors=True)
|
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
self.merge_backup(backup_dir, 'node', backup_id)
|
|
|
|
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=["-j", "4"])
|
|
|
|
|
|
|
|
pgdata_restored = self.pgdata_content(node.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
def test_merge_page_truncate(self):
|
|
|
|
"""
|
|
|
|
make node, create table, take full backup,
|
|
|
|
delete last 3 pages, vacuum relation,
|
|
|
|
take page backup, merge full and page,
|
|
|
|
restore last page backup and check data correctness
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-08-02 10:57:39 +02:00
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={
|
|
|
|
'checkpoint_timeout': '300s',
|
2019-10-08 13:33:52 +02:00
|
|
|
'autovacuum': 'off'})
|
|
|
|
|
2018-08-02 10:57:39 +02:00
|
|
|
node_restored = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node_restored'))
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node_restored.cleanup()
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-08-02 10:57:39 +02:00
|
|
|
self.create_tblspace_in_node(node, 'somedata')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create sequence t_seq; "
|
|
|
|
"create table t_heap tablespace somedata as select i as id, "
|
|
|
|
"md5(i::text) as text, "
|
|
|
|
"md5(repeat(i::text,10))::tsvector as tsvector "
|
|
|
|
"from generate_series(0,1024) i;")
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"vacuum t_heap")
|
|
|
|
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"delete from t_heap where ctid >= '(11,0)'")
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"vacuum t_heap")
|
|
|
|
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
page_id = self.show_pb(backup_dir, "node")[1]["id"]
|
|
|
|
self.merge_backup(backup_dir, "node", page_id)
|
|
|
|
|
|
|
|
self.validate_pb(backup_dir)
|
|
|
|
|
|
|
|
old_tablespace = self.get_tblspace_path(node, 'somedata')
|
|
|
|
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
|
|
|
|
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node_restored,
|
|
|
|
options=[
|
|
|
|
"-j", "4",
|
2019-10-08 13:33:52 +02:00
|
|
|
"-T", "{0}={1}".format(old_tablespace, new_tablespace)])
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
# Physical comparison
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
2019-10-19 08:48:35 +02:00
|
|
|
self.set_auto_conf(node_restored, {'port': node_restored.port})
|
2018-08-02 10:57:39 +02:00
|
|
|
node_restored.slow_start()
|
|
|
|
|
|
|
|
# Logical comparison
|
|
|
|
result1 = node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select * from t_heap")
|
|
|
|
|
|
|
|
result2 = node_restored.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select * from t_heap")
|
|
|
|
|
|
|
|
self.assertEqual(result1, result2)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
def test_merge_delta_truncate(self):
|
|
|
|
"""
|
|
|
|
make node, create table, take full backup,
|
|
|
|
delete last 3 pages, vacuum relation,
|
|
|
|
take page backup, merge full and page,
|
|
|
|
restore last page backup and check data correctness
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-08-02 10:57:39 +02:00
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={
|
|
|
|
'checkpoint_timeout': '300s',
|
2019-10-08 13:33:52 +02:00
|
|
|
'autovacuum': 'off'})
|
|
|
|
|
2018-08-02 10:57:39 +02:00
|
|
|
node_restored = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node_restored'))
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node_restored.cleanup()
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-08-02 10:57:39 +02:00
|
|
|
self.create_tblspace_in_node(node, 'somedata')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create sequence t_seq; "
|
|
|
|
"create table t_heap tablespace somedata as select i as id, "
|
|
|
|
"md5(i::text) as text, "
|
|
|
|
"md5(repeat(i::text,10))::tsvector as tsvector "
|
|
|
|
"from generate_series(0,1024) i;")
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"vacuum t_heap")
|
|
|
|
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"delete from t_heap where ctid >= '(11,0)'")
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"vacuum t_heap")
|
|
|
|
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
page_id = self.show_pb(backup_dir, "node")[1]["id"]
|
|
|
|
self.merge_backup(backup_dir, "node", page_id)
|
|
|
|
|
|
|
|
self.validate_pb(backup_dir)
|
|
|
|
|
|
|
|
old_tablespace = self.get_tblspace_path(node, 'somedata')
|
|
|
|
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
|
|
|
|
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node_restored,
|
|
|
|
options=[
|
|
|
|
"-j", "4",
|
2019-10-08 13:33:52 +02:00
|
|
|
"-T", "{0}={1}".format(old_tablespace, new_tablespace)])
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
# Physical comparison
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
2019-10-19 08:48:35 +02:00
|
|
|
self.set_auto_conf(node_restored, {'port': node_restored.port})
|
2018-08-02 10:57:39 +02:00
|
|
|
node_restored.slow_start()
|
|
|
|
|
|
|
|
# Logical comparison
|
|
|
|
result1 = node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select * from t_heap")
|
|
|
|
|
|
|
|
result2 = node_restored.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select * from t_heap")
|
|
|
|
|
|
|
|
self.assertEqual(result1, result2)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
def test_merge_ptrack_truncate(self):
|
|
|
|
"""
|
|
|
|
make node, create table, take full backup,
|
|
|
|
delete last 3 pages, vacuum relation,
|
|
|
|
take page backup, merge full and page,
|
|
|
|
restore last page backup and check data correctness
|
|
|
|
"""
|
2019-07-12 17:01:28 +02:00
|
|
|
if not self.ptrack:
|
|
|
|
return unittest.skip('Skipped because ptrack support is disabled')
|
|
|
|
|
2018-08-02 10:57:39 +02:00
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-08-02 10:57:39 +02:00
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
2019-12-23 17:23:29 +02:00
|
|
|
ptrack_enable=True,
|
|
|
|
pg_options={'autovacuum': 'off'})
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2019-12-23 17:23:29 +02:00
|
|
|
|
|
|
|
if node.major_version >= 12:
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"CREATE EXTENSION ptrack")
|
|
|
|
|
2018-08-02 10:57:39 +02:00
|
|
|
self.create_tblspace_in_node(node, 'somedata')
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create sequence t_seq; "
|
|
|
|
"create table t_heap tablespace somedata as select i as id, "
|
|
|
|
"md5(i::text) as text, "
|
|
|
|
"md5(repeat(i::text,10))::tsvector as tsvector "
|
|
|
|
"from generate_series(0,1024) i;")
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"vacuum t_heap")
|
|
|
|
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"delete from t_heap where ctid >= '(11,0)'")
|
2018-11-12 10:51:58 +02:00
|
|
|
|
2018-08-02 10:57:39 +02:00
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"vacuum t_heap")
|
|
|
|
|
2018-11-12 10:51:58 +02:00
|
|
|
page_id = self.backup_node(
|
2018-08-09 15:19:31 +02:00
|
|
|
backup_dir, 'node', node, backup_type='ptrack')
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
self.merge_backup(backup_dir, "node", page_id)
|
|
|
|
|
|
|
|
self.validate_pb(backup_dir)
|
|
|
|
|
2019-12-23 17:23:29 +02:00
|
|
|
node_restored = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node_restored'))
|
|
|
|
node_restored.cleanup()
|
|
|
|
|
2018-08-02 10:57:39 +02:00
|
|
|
old_tablespace = self.get_tblspace_path(node, 'somedata')
|
|
|
|
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
|
|
|
|
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node_restored,
|
|
|
|
options=[
|
|
|
|
"-j", "4",
|
2019-12-23 17:23:29 +02:00
|
|
|
"-T", "{0}={1}".format(old_tablespace, new_tablespace)])
|
2018-08-02 10:57:39 +02:00
|
|
|
|
|
|
|
# Physical comparison
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
2019-10-19 08:48:35 +02:00
|
|
|
self.set_auto_conf(node_restored, {'port': node_restored.port})
|
2018-08-02 10:57:39 +02:00
|
|
|
node_restored.slow_start()
|
|
|
|
|
|
|
|
# Logical comparison
|
|
|
|
result1 = node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select * from t_heap")
|
|
|
|
|
|
|
|
result2 = node_restored.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select * from t_heap")
|
|
|
|
|
|
|
|
self.assertEqual(result1, result2)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
2018-08-09 15:19:31 +02:00
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_merge_delta_delete(self):
|
|
|
|
"""
|
|
|
|
Make node, create tablespace with table, take full backup,
|
2018-08-13 09:08:38 +02:00
|
|
|
alter tablespace location, take delta backup, merge full and delta,
|
|
|
|
restore database.
|
2018-08-09 15:19:31 +02:00
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2018-08-09 15:19:31 +02:00
|
|
|
set_replication=True, initdb_params=['--data-checksums'],
|
|
|
|
pg_options={
|
|
|
|
'checkpoint_timeout': '30s',
|
|
|
|
'autovacuum': 'off'
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-08-09 15:19:31 +02:00
|
|
|
|
|
|
|
self.create_tblspace_in_node(node, 'somedata')
|
|
|
|
|
|
|
|
# FULL backup
|
|
|
|
self.backup_node(backup_dir, 'node', node, options=["--stream"])
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap tablespace somedata as select i as id,"
|
|
|
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
|
|
|
" from generate_series(0,100) i"
|
|
|
|
)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"delete from t_heap"
|
|
|
|
)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"vacuum t_heap"
|
|
|
|
)
|
|
|
|
|
|
|
|
# DELTA BACKUP
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='delta',
|
|
|
|
options=["--stream"]
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
backup_id = self.show_pb(backup_dir, "node")[1]["id"]
|
2018-10-11 15:28:22 +02:00
|
|
|
self.merge_backup(backup_dir, "node", backup_id, options=["-j", "4"])
|
2018-08-09 15:19:31 +02:00
|
|
|
|
|
|
|
# RESTORE
|
|
|
|
node_restored = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node_restored')
|
2018-08-09 15:19:31 +02:00
|
|
|
)
|
|
|
|
node_restored.cleanup()
|
|
|
|
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node_restored,
|
|
|
|
options=[
|
|
|
|
"-j", "4",
|
|
|
|
"-T", "{0}={1}".format(
|
|
|
|
self.get_tblspace_path(node, 'somedata'),
|
|
|
|
self.get_tblspace_path(node_restored, 'somedata')
|
|
|
|
)
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
# GET RESTORED PGDATA AND COMPARE
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# START RESTORED NODE
|
2019-10-19 08:48:35 +02:00
|
|
|
self.set_auto_conf(node_restored, {'port': node_restored.port})
|
2018-12-25 16:48:49 +02:00
|
|
|
node_restored.slow_start()
|
2018-08-09 15:19:31 +02:00
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
2018-08-13 09:08:38 +02:00
|
|
|
|
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_continue_failed_merge(self):
|
|
|
|
"""
|
|
|
|
Check that failed MERGE can be continued
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
2019-04-22 19:52:00 +02:00
|
|
|
base_dir=os.path.join(
|
|
|
|
module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'])
|
2018-08-13 09:08:38 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-08-13 09:08:38 +02:00
|
|
|
|
|
|
|
# FULL backup
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap as select i as id,"
|
|
|
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
|
|
|
" from generate_series(0,1000) i"
|
|
|
|
)
|
|
|
|
|
|
|
|
# DELTA BACKUP
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta'
|
|
|
|
)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"delete from t_heap"
|
|
|
|
)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"vacuum t_heap"
|
|
|
|
)
|
|
|
|
|
|
|
|
# DELTA BACKUP
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta'
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
backup_id = self.show_pb(backup_dir, "node")[2]["id"]
|
|
|
|
|
|
|
|
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
gdb.set_breakpoint('backup_non_data_file_internal')
|
2018-08-13 09:08:38 +02:00
|
|
|
gdb.run_until_break()
|
|
|
|
|
2019-04-22 19:52:00 +02:00
|
|
|
gdb.continue_execution_until_break(5)
|
2018-08-13 09:08:38 +02:00
|
|
|
|
|
|
|
gdb._execute('signal SIGKILL')
|
2019-11-01 14:23:31 +02:00
|
|
|
gdb._execute('detach')
|
|
|
|
time.sleep(1)
|
2018-08-13 09:08:38 +02:00
|
|
|
|
|
|
|
print(self.show_pb(backup_dir, as_text=True, as_json=False))
|
|
|
|
|
|
|
|
# Try to continue failed MERGE
|
|
|
|
self.merge_backup(backup_dir, "node", backup_id)
|
2018-10-16 17:13:27 +02:00
|
|
|
|
|
|
|
# Drop node and restore it
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
2018-10-27 11:17:22 +02:00
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
# @unittest.skip("skip")
|
2018-10-27 11:17:22 +02:00
|
|
|
def test_continue_failed_merge_with_corrupted_delta_backup(self):
|
|
|
|
"""
|
|
|
|
Fail merge via gdb, corrupt DELTA backup, try to continue merge
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
2018-12-26 21:59:13 +02:00
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2019-04-22 19:52:00 +02:00
|
|
|
set_replication=True, initdb_params=['--data-checksums'])
|
2018-10-27 11:17:22 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
2018-12-25 16:48:49 +02:00
|
|
|
node.slow_start()
|
2018-10-27 11:17:22 +02:00
|
|
|
|
|
|
|
# FULL backup
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap as select i as id,"
|
|
|
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
2019-10-15 20:48:37 +02:00
|
|
|
" from generate_series(0,1000) i")
|
2018-10-27 11:17:22 +02:00
|
|
|
|
|
|
|
old_path = node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select pg_relation_filepath('t_heap')").rstrip()
|
|
|
|
|
|
|
|
# DELTA BACKUP
|
|
|
|
self.backup_node(
|
2019-10-15 20:48:37 +02:00
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
2018-10-27 11:17:22 +02:00
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
2019-10-15 20:48:37 +02:00
|
|
|
"update t_heap set id = 100500")
|
2018-10-27 11:17:22 +02:00
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
2019-10-15 20:48:37 +02:00
|
|
|
"vacuum full t_heap")
|
2018-10-27 11:17:22 +02:00
|
|
|
|
|
|
|
new_path = node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select pg_relation_filepath('t_heap')").rstrip()
|
|
|
|
|
|
|
|
# DELTA BACKUP
|
|
|
|
backup_id_2 = self.backup_node(
|
2019-10-15 20:48:37 +02:00
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
2018-10-27 11:17:22 +02:00
|
|
|
|
|
|
|
backup_id = self.show_pb(backup_dir, "node")[1]["id"]
|
|
|
|
|
|
|
|
# Failed MERGE
|
|
|
|
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
2020-02-21 21:19:52 +02:00
|
|
|
gdb.set_breakpoint('backup_non_data_file_internal')
|
2018-10-27 11:17:22 +02:00
|
|
|
gdb.run_until_break()
|
|
|
|
|
2019-03-02 01:29:58 +02:00
|
|
|
gdb.continue_execution_until_break(2)
|
2018-10-27 11:17:22 +02:00
|
|
|
|
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
# CORRUPT incremental backup
|
|
|
|
# read block from future
|
|
|
|
# block_size + backup_header = 8200
|
|
|
|
file = os.path.join(
|
2018-12-27 21:40:23 +02:00
|
|
|
backup_dir, 'backups', 'node',
|
|
|
|
backup_id_2, 'database', new_path)
|
2018-10-27 11:17:22 +02:00
|
|
|
with open(file, 'rb') as f:
|
|
|
|
f.seek(8200)
|
|
|
|
block_1 = f.read(8200)
|
|
|
|
f.close
|
|
|
|
|
|
|
|
# write block from future
|
|
|
|
file = os.path.join(
|
2018-12-27 21:40:23 +02:00
|
|
|
backup_dir, 'backups', 'node',
|
|
|
|
backup_id, 'database', old_path)
|
2018-10-27 11:17:22 +02:00
|
|
|
with open(file, 'r+b') as f:
|
|
|
|
f.seek(8200)
|
|
|
|
f.write(block_1)
|
|
|
|
f.close
|
|
|
|
|
|
|
|
# Try to continue failed MERGE
|
|
|
|
try:
|
2019-10-15 20:48:37 +02:00
|
|
|
print(self.merge_backup(backup_dir, "node", backup_id))
|
2018-10-27 11:17:22 +02:00
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because of incremental backup corruption.\n "
|
|
|
|
"Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
|
|
|
except ProbackupException as e:
|
2018-11-09 17:32:37 +02:00
|
|
|
self.assertTrue(
|
2020-02-21 21:19:52 +02:00
|
|
|
"ERROR: Backup {0} has status CORRUPT, merge is aborted".format(
|
2018-11-09 17:32:37 +02:00
|
|
|
backup_id) in e.message,
|
2018-10-27 11:17:22 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2019-01-11 16:44:39 +02:00
|
|
|
def test_continue_failed_merge_2(self):
|
|
|
|
"""
|
2019-04-11 15:33:16 +02:00
|
|
|
Check that failed MERGE on delete can be continued
|
2019-01-11 16:44:39 +02:00
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
2019-03-28 17:08:49 +02:00
|
|
|
set_replication=True, initdb_params=['--data-checksums'])
|
2019-01-11 16:44:39 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# FULL backup
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap as select i as id,"
|
|
|
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
2019-02-28 20:12:32 +02:00
|
|
|
" from generate_series(0,1000) i")
|
2019-01-11 16:44:39 +02:00
|
|
|
|
|
|
|
# DELTA BACKUP
|
|
|
|
self.backup_node(
|
2019-02-28 20:12:32 +02:00
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
2019-01-11 16:44:39 +02:00
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
2019-02-28 20:12:32 +02:00
|
|
|
"delete from t_heap")
|
2019-01-11 16:44:39 +02:00
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
2019-02-28 20:12:32 +02:00
|
|
|
"vacuum t_heap")
|
2019-01-11 16:44:39 +02:00
|
|
|
|
|
|
|
# DELTA BACKUP
|
|
|
|
self.backup_node(
|
2019-02-28 20:12:32 +02:00
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
2019-01-11 16:44:39 +02:00
|
|
|
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
backup_id = self.show_pb(backup_dir, "node")[2]["id"]
|
|
|
|
|
|
|
|
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
|
|
|
|
2019-05-01 01:39:08 +02:00
|
|
|
gdb.set_breakpoint('pgFileDelete')
|
|
|
|
|
2019-01-11 16:44:39 +02:00
|
|
|
gdb.run_until_break()
|
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
gdb._execute('thread apply all bt')
|
|
|
|
|
2019-03-02 01:29:58 +02:00
|
|
|
gdb.continue_execution_until_break(20)
|
2019-01-11 16:44:39 +02:00
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
gdb._execute('thread apply all bt')
|
|
|
|
|
2019-01-11 16:44:39 +02:00
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
print(self.show_pb(backup_dir, as_text=True, as_json=False))
|
|
|
|
|
|
|
|
backup_id_deleted = self.show_pb(backup_dir, "node")[1]["id"]
|
|
|
|
|
2019-04-11 15:33:16 +02:00
|
|
|
# TODO check that full backup has meta info is equal to DELETTING
|
|
|
|
|
2019-01-11 16:44:39 +02:00
|
|
|
# Try to continue failed MERGE
|
2019-03-28 17:08:49 +02:00
|
|
|
self.merge_backup(backup_dir, "node", backup_id)
|
2019-03-12 20:44:40 +02:00
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
def test_continue_failed_merge_3(self):
|
|
|
|
"""
|
2020-02-21 21:19:52 +02:00
|
|
|
Check that failed MERGE cannot be continued if intermediate
|
|
|
|
backup is missing.
|
2019-03-12 20:44:40 +02:00
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True, initdb_params=['--data-checksums'])
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Create test data
|
|
|
|
node.safe_psql("postgres", "create sequence t_seq")
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap as select i as id, nextval('t_seq')"
|
|
|
|
" as t_seq, md5(i::text) as text, md5(i::text)::tsvector"
|
|
|
|
" as tsvector from generate_series(0,100000) i"
|
|
|
|
)
|
|
|
|
|
|
|
|
# FULL backup
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
# CREATE FEW PAGE BACKUP
|
|
|
|
i = 0
|
|
|
|
|
|
|
|
while i < 2:
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"delete from t_heap"
|
|
|
|
)
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"vacuum t_heap"
|
|
|
|
)
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"insert into t_heap select i as id, nextval('t_seq') as t_seq,"
|
|
|
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
|
|
|
" from generate_series(100,200000) i"
|
|
|
|
)
|
|
|
|
|
|
|
|
# PAGE BACKUP
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page'
|
|
|
|
)
|
|
|
|
i = i + 1
|
|
|
|
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
backup_id_merge = self.show_pb(backup_dir, "node")[2]["id"]
|
|
|
|
backup_id_delete = self.show_pb(backup_dir, "node")[1]["id"]
|
|
|
|
|
|
|
|
print(self.show_pb(backup_dir, as_text=True, as_json=False))
|
|
|
|
|
|
|
|
gdb = self.merge_backup(backup_dir, "node", backup_id_merge, gdb=True)
|
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
gdb.set_breakpoint('backup_non_data_file_internal')
|
2019-03-12 20:44:40 +02:00
|
|
|
gdb.run_until_break()
|
|
|
|
gdb.continue_execution_until_break(2)
|
|
|
|
|
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
print(self.show_pb(backup_dir, as_text=True, as_json=False))
|
2020-02-21 21:19:52 +02:00
|
|
|
# print(os.path.join(backup_dir, "backups", "node", backup_id_delete))
|
2019-03-12 20:44:40 +02:00
|
|
|
|
|
|
|
# DELETE PAGE1
|
|
|
|
shutil.rmtree(
|
|
|
|
os.path.join(backup_dir, "backups", "node", backup_id_delete))
|
|
|
|
|
2019-01-11 16:44:39 +02:00
|
|
|
# Try to continue failed MERGE
|
|
|
|
try:
|
2019-03-12 20:44:40 +02:00
|
|
|
self.merge_backup(backup_dir, "node", backup_id_merge)
|
2019-01-11 16:44:39 +02:00
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because of backup corruption.\n "
|
|
|
|
"Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
|
|
|
except ProbackupException as e:
|
|
|
|
self.assertTrue(
|
2020-02-21 21:19:52 +02:00
|
|
|
"ERROR: Incremental chain is broken, "
|
|
|
|
"merge is impossible to finish" in e.message,
|
2019-01-11 16:44:39 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2019-02-28 20:12:32 +02:00
|
|
|
def test_merge_different_compression_algo(self):
|
|
|
|
"""
|
2019-07-23 19:10:58 +02:00
|
|
|
Check that backups with different compression algorithms can be merged
|
2019-02-28 20:12:32 +02:00
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
2019-04-22 19:52:00 +02:00
|
|
|
initdb_params=['--data-checksums'])
|
2019-02-28 20:12:32 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# FULL backup
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--compress-algorithm=zlib'])
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create table t_heap as select i as id,"
|
|
|
|
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
|
|
|
" from generate_series(0,1000) i")
|
|
|
|
|
|
|
|
# DELTA BACKUP
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='delta', options=['--compress-algorithm=pglz'])
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"delete from t_heap")
|
|
|
|
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"vacuum t_heap")
|
|
|
|
|
|
|
|
# DELTA BACKUP
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
if self.paranoia:
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
backup_id = self.show_pb(backup_dir, "node")[2]["id"]
|
|
|
|
|
|
|
|
self.merge_backup(backup_dir, "node", backup_id)
|
|
|
|
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2019-03-03 01:35:06 +02:00
|
|
|
def test_merge_different_wal_modes(self):
|
|
|
|
"""
|
|
|
|
Check that backups with different wal modes can be merged
|
|
|
|
correctly
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'])
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# FULL stream backup
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
|
|
|
# DELTA archive backup
|
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
self.merge_backup(backup_dir, 'node', backup_id=backup_id)
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'ARCHIVE', self.show_pb(backup_dir, 'node', backup_id)['wal'])
|
|
|
|
|
|
|
|
# DELTA stream backup
|
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
backup_type='delta', options=['--stream'])
|
|
|
|
|
|
|
|
self.merge_backup(backup_dir, 'node', backup_id=backup_id)
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'STREAM', self.show_pb(backup_dir, 'node', backup_id)['wal'])
|
|
|
|
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2019-03-09 12:36:21 +02:00
|
|
|
def test_crash_after_opening_backup_control_1(self):
|
2019-03-09 11:43:04 +02:00
|
|
|
"""
|
|
|
|
check that crashing after opening backup.control
|
|
|
|
for writing will not result in losing backup metadata
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
2019-03-09 13:25:50 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
2019-03-09 11:43:04 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# FULL stream backup
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
|
|
|
# DELTA archive backup
|
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
print(self.show_pb(
|
|
|
|
backup_dir, 'node', as_json=False, as_text=True))
|
|
|
|
|
|
|
|
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
|
|
|
gdb.set_breakpoint('write_backup_filelist')
|
|
|
|
gdb.run_until_break()
|
|
|
|
|
|
|
|
gdb.set_breakpoint('write_backup')
|
|
|
|
gdb.continue_execution_until_break()
|
|
|
|
gdb.set_breakpoint('pgBackupWriteControl')
|
|
|
|
gdb.continue_execution_until_break()
|
|
|
|
|
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
print(self.show_pb(
|
|
|
|
backup_dir, 'node', as_json=False, as_text=True))
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'MERGING', self.show_pb(backup_dir, 'node')[0]['status'])
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'MERGING', self.show_pb(backup_dir, 'node')[1]['status'])
|
|
|
|
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2019-03-09 12:36:21 +02:00
|
|
|
def test_crash_after_opening_backup_control_2(self):
|
|
|
|
"""
|
|
|
|
check that crashing after opening backup_content.control
|
|
|
|
for writing will not result in losing metadata about backup files
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
2019-03-09 13:25:50 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
2019-03-09 12:36:21 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Add data
|
|
|
|
node.pgbench_init(scale=3)
|
|
|
|
|
|
|
|
# FULL backup
|
|
|
|
full_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
|
|
|
# Change data
|
2019-03-09 13:25:50 +02:00
|
|
|
pgbench = node.pgbench(options=['-T', '20', '-c', '2'])
|
2019-03-09 12:36:21 +02:00
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
path = node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
"select pg_relation_filepath('pgbench_accounts')").rstrip()
|
|
|
|
|
2019-03-09 13:25:50 +02:00
|
|
|
fsm_path = path + '_fsm'
|
2019-03-09 12:36:21 +02:00
|
|
|
|
2019-03-09 13:25:50 +02:00
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'vacuum pgbench_accounts')
|
2019-03-09 12:36:21 +02:00
|
|
|
|
|
|
|
# DELTA backup
|
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
print(self.show_pb(
|
|
|
|
backup_dir, 'node', as_json=False, as_text=True))
|
|
|
|
|
|
|
|
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
|
|
|
gdb.set_breakpoint('write_backup_filelist')
|
|
|
|
gdb.run_until_break()
|
|
|
|
|
2020-05-11 14:25:29 +02:00
|
|
|
gdb.set_breakpoint('sprintf')
|
|
|
|
gdb.continue_execution_until_break(20)
|
2019-03-09 12:36:21 +02:00
|
|
|
|
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
print(self.show_pb(
|
|
|
|
backup_dir, 'node', as_json=False, as_text=True))
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'MERGING', self.show_pb(backup_dir, 'node')[0]['status'])
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'MERGING', self.show_pb(backup_dir, 'node')[1]['status'])
|
|
|
|
|
|
|
|
# In to_backup drop file that comes from from_backup
|
2019-03-09 13:25:50 +02:00
|
|
|
# emulate crash during previous merge
|
|
|
|
file_to_remove = os.path.join(
|
|
|
|
backup_dir, 'backups',
|
|
|
|
'node', full_id, 'database', fsm_path)
|
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
# print(file_to_remove)
|
2019-03-09 13:25:50 +02:00
|
|
|
|
|
|
|
os.remove(file_to_remove)
|
2019-03-09 12:36:21 +02:00
|
|
|
|
|
|
|
# Continue failed merge
|
|
|
|
self.merge_backup(backup_dir, "node", backup_id)
|
|
|
|
|
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
# restore merge backup
|
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
pgdata_restored = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2019-03-09 13:25:50 +02:00
|
|
|
def test_losing_file_after_failed_merge(self):
|
|
|
|
"""
|
|
|
|
check that crashing after opening backup_content.control
|
|
|
|
for writing will not result in losing metadata about backup files
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
2019-03-28 17:08:49 +02:00
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
2019-03-09 13:25:50 +02:00
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Add data
|
2019-03-28 19:59:37 +02:00
|
|
|
node.pgbench_init(scale=1)
|
2019-03-09 13:25:50 +02:00
|
|
|
|
|
|
|
# FULL backup
|
|
|
|
full_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
|
|
|
# Change data
|
2019-03-28 19:59:37 +02:00
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
"update pgbench_accounts set aid = aid + 1005000")
|
2019-03-09 13:25:50 +02:00
|
|
|
|
|
|
|
path = node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
"select pg_relation_filepath('pgbench_accounts')").rstrip()
|
|
|
|
|
2019-03-28 17:08:49 +02:00
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
"VACUUM pgbench_accounts")
|
|
|
|
|
2019-03-09 13:25:50 +02:00
|
|
|
vm_path = path + '_vm'
|
|
|
|
|
|
|
|
# DELTA backup
|
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
print(self.show_pb(
|
|
|
|
backup_dir, 'node', as_json=False, as_text=True))
|
|
|
|
|
|
|
|
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
|
|
|
gdb.set_breakpoint('write_backup_filelist')
|
|
|
|
gdb.run_until_break()
|
|
|
|
|
2020-05-11 14:25:29 +02:00
|
|
|
gdb.set_breakpoint('sprintf')
|
|
|
|
gdb.continue_execution_until_break(20)
|
2019-07-19 19:05:30 +02:00
|
|
|
|
2019-03-09 13:25:50 +02:00
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
print(self.show_pb(
|
|
|
|
backup_dir, 'node', as_json=False, as_text=True))
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'MERGING', self.show_pb(backup_dir, 'node')[0]['status'])
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'MERGING', self.show_pb(backup_dir, 'node')[1]['status'])
|
|
|
|
|
|
|
|
# In to_backup drop file that comes from from_backup
|
|
|
|
# emulate crash during previous merge
|
|
|
|
file_to_remove = os.path.join(
|
|
|
|
backup_dir, 'backups',
|
|
|
|
'node', full_id, 'database', vm_path)
|
|
|
|
|
|
|
|
os.remove(file_to_remove)
|
|
|
|
|
|
|
|
# Try to continue failed MERGE
|
2019-03-28 17:08:49 +02:00
|
|
|
self.merge_backup(backup_dir, "node", backup_id)
|
2019-03-09 13:25:50 +02:00
|
|
|
|
|
|
|
self.assertEqual(
|
2019-03-28 17:08:49 +02:00
|
|
|
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
|
|
|
|
|
|
|
|
node.cleanup()
|
|
|
|
|
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
pgdata_restored = self.pgdata_content(node.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
2019-03-09 13:25:50 +02:00
|
|
|
|
2019-03-27 17:16:53 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2019-05-06 17:04:49 +02:00
|
|
|
def test_failed_merge_after_delete(self):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# add database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'CREATE DATABASE testdb')
|
|
|
|
|
|
|
|
dboid = node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select oid from pg_database where datname = 'testdb'").rstrip()
|
|
|
|
|
|
|
|
# take FULL backup
|
|
|
|
full_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
|
|
|
# drop database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'DROP DATABASE testdb')
|
|
|
|
|
|
|
|
# take PAGE backup
|
|
|
|
page_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
2019-05-28 10:11:41 +02:00
|
|
|
page_id_2 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
2019-05-06 17:04:49 +02:00
|
|
|
gdb = self.merge_backup(
|
|
|
|
backup_dir, 'node', page_id,
|
|
|
|
gdb=True, options=['--log-level-console=verbose'])
|
2019-05-06 18:18:19 +02:00
|
|
|
|
2019-05-06 17:04:49 +02:00
|
|
|
gdb.set_breakpoint('delete_backup_files')
|
|
|
|
gdb.run_until_break()
|
|
|
|
|
|
|
|
gdb.set_breakpoint('pgFileDelete')
|
2019-05-06 18:18:19 +02:00
|
|
|
gdb.continue_execution_until_break(20)
|
2019-05-06 17:04:49 +02:00
|
|
|
|
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
# backup half-merged
|
|
|
|
self.assertEqual(
|
2020-02-21 21:19:52 +02:00
|
|
|
'MERGED', self.show_pb(backup_dir, 'node')[0]['status'])
|
2019-05-06 17:04:49 +02:00
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
full_id, self.show_pb(backup_dir, 'node')[0]['id'])
|
|
|
|
|
|
|
|
db_path = os.path.join(
|
|
|
|
backup_dir, 'backups', 'node',
|
|
|
|
full_id, 'database', 'base', dboid)
|
|
|
|
|
2019-05-29 17:49:38 +02:00
|
|
|
try:
|
|
|
|
self.merge_backup(
|
|
|
|
backup_dir, 'node', page_id_2,
|
|
|
|
options=['--log-level-console=verbose'])
|
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because of missing parent.\n "
|
|
|
|
"Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
|
|
|
except ProbackupException as e:
|
|
|
|
self.assertTrue(
|
2020-02-21 21:19:52 +02:00
|
|
|
"ERROR: Full backup {0} has unfinished merge with backup {1}".format(
|
|
|
|
full_id, page_id) in e.message,
|
2019-05-29 17:49:38 +02:00
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
2019-05-06 17:04:49 +02:00
|
|
|
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
def test_failed_merge_after_delete_1(self):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# take FULL backup
|
|
|
|
full_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
2019-05-23 10:29:00 +02:00
|
|
|
node.pgbench_init(scale=1)
|
|
|
|
|
|
|
|
page_1 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
# Change PAGE1 backup status to ERROR
|
2019-05-23 10:29:00 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_1, 'ERROR')
|
|
|
|
|
2019-05-06 17:04:49 +02:00
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
2019-05-23 10:29:00 +02:00
|
|
|
# add data
|
2019-05-06 17:04:49 +02:00
|
|
|
pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
# take PAGE2 backup
|
2019-05-06 17:04:49 +02:00
|
|
|
page_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
# Change PAGE1 backup status to OK
|
2019-05-23 10:29:00 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_1, 'OK')
|
|
|
|
|
2019-05-06 17:04:49 +02:00
|
|
|
gdb = self.merge_backup(
|
|
|
|
backup_dir, 'node', page_id,
|
|
|
|
gdb=True, options=['--log-level-console=verbose'])
|
2019-05-06 18:18:19 +02:00
|
|
|
|
2019-05-06 17:04:49 +02:00
|
|
|
gdb.set_breakpoint('delete_backup_files')
|
|
|
|
gdb.run_until_break()
|
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
# gdb.set_breakpoint('parray_bsearch')
|
|
|
|
# gdb.continue_execution_until_break()
|
2019-05-06 17:04:49 +02:00
|
|
|
|
|
|
|
gdb.set_breakpoint('pgFileDelete')
|
|
|
|
gdb.continue_execution_until_break(30)
|
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
full_id, self.show_pb(backup_dir, 'node')[0]['id'])
|
|
|
|
|
|
|
|
# restore
|
|
|
|
node.cleanup()
|
2019-05-29 17:49:38 +02:00
|
|
|
try:
|
2020-02-21 21:19:52 +02:00
|
|
|
#self.restore_node(backup_dir, 'node', node, backup_id=page_1)
|
2019-05-29 17:49:38 +02:00
|
|
|
self.restore_node(backup_dir, 'node', node)
|
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because of orphan status.\n "
|
|
|
|
"Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
|
|
|
except ProbackupException as e:
|
|
|
|
self.assertIn(
|
|
|
|
"ERROR: Backup {0} is orphan".format(page_1),
|
|
|
|
e.message,
|
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
2019-05-06 17:04:49 +02:00
|
|
|
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2020-02-21 21:19:52 +02:00
|
|
|
def test_failed_merge_after_delete_2(self):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# take FULL backup
|
|
|
|
full_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
|
|
|
node.pgbench_init(scale=1)
|
|
|
|
|
|
|
|
page_1 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# add data
|
|
|
|
pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
# take PAGE2 backup
|
|
|
|
page_2 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
gdb = self.merge_backup(
|
|
|
|
backup_dir, 'node', page_2, gdb=True,
|
|
|
|
options=['--log-level-console=VERBOSE'])
|
|
|
|
|
|
|
|
gdb.set_breakpoint('pgFileDelete')
|
|
|
|
gdb.run_until_break()
|
|
|
|
gdb.continue_execution_until_break(2)
|
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
self.delete_pb(backup_dir, 'node', backup_id=page_2)
|
|
|
|
|
|
|
|
# rerun merge
|
|
|
|
try:
|
|
|
|
#self.restore_node(backup_dir, 'node', node, backup_id=page_1)
|
|
|
|
self.merge_backup(backup_dir, 'node', page_1)
|
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because of backup is missing.\n "
|
|
|
|
"Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
|
|
|
except ProbackupException as e:
|
|
|
|
self.assertIn(
|
|
|
|
"ERROR: Full backup {0} has unfinished merge "
|
|
|
|
"with backup {1}".format(full_id, page_2),
|
|
|
|
e.message,
|
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
|
|
|
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
|
|
|
def test_failed_merge_after_delete_3(self):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# add database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'CREATE DATABASE testdb')
|
|
|
|
|
|
|
|
dboid = node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"select oid from pg_database where datname = 'testdb'").rstrip()
|
|
|
|
|
|
|
|
# take FULL backup
|
|
|
|
full_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
|
|
|
# drop database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'DROP DATABASE testdb')
|
|
|
|
|
|
|
|
# take PAGE backup
|
|
|
|
page_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# create database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'create DATABASE testdb')
|
|
|
|
|
|
|
|
page_id_2 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
gdb = self.merge_backup(
|
|
|
|
backup_dir, 'node', page_id,
|
|
|
|
gdb=True, options=['--log-level-console=verbose'])
|
|
|
|
|
|
|
|
gdb.set_breakpoint('delete_backup_files')
|
|
|
|
gdb.run_until_break()
|
|
|
|
|
|
|
|
gdb.set_breakpoint('pgFileDelete')
|
|
|
|
gdb.continue_execution_until_break(20)
|
|
|
|
|
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
# backup half-merged
|
|
|
|
self.assertEqual(
|
|
|
|
'MERGED', self.show_pb(backup_dir, 'node')[0]['status'])
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
full_id, self.show_pb(backup_dir, 'node')[0]['id'])
|
|
|
|
|
|
|
|
db_path = os.path.join(
|
|
|
|
backup_dir, 'backups', 'node', full_id)
|
|
|
|
|
|
|
|
# FULL backup is missing now
|
|
|
|
shutil.rmtree(db_path)
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.merge_backup(
|
|
|
|
backup_dir, 'node', page_id_2,
|
|
|
|
options=['--log-level-console=verbose'])
|
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because of missing parent.\n "
|
|
|
|
"Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
|
|
|
except ProbackupException as e:
|
|
|
|
self.assertTrue(
|
|
|
|
"ERROR: Failed to find parent full backup for {0}".format(
|
|
|
|
page_id_2) in e.message,
|
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
|
|
|
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2019-03-27 17:16:53 +02:00
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_merge_backup_from_future(self):
|
|
|
|
"""
|
|
|
|
take FULL backup, table PAGE backup from future,
|
|
|
|
try to merge page with FULL
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
2020-04-19 01:50:49 +02:00
|
|
|
pg_options={'autovacuum': 'off'})
|
2019-03-27 17:16:53 +02:00
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Take FULL
|
|
|
|
self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
2020-05-11 20:23:25 +02:00
|
|
|
node.pgbench_init(scale=5)
|
2019-03-27 17:16:53 +02:00
|
|
|
|
|
|
|
# Take PAGE from future
|
2019-05-23 10:29:00 +02:00
|
|
|
backup_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
2019-03-27 17:16:53 +02:00
|
|
|
|
|
|
|
with open(
|
|
|
|
os.path.join(
|
|
|
|
backup_dir, 'backups', 'node',
|
|
|
|
backup_id, "backup.control"), "a") as conf:
|
|
|
|
conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format(
|
|
|
|
datetime.now() + timedelta(days=3)))
|
|
|
|
|
|
|
|
# rename directory
|
|
|
|
new_id = self.show_pb(backup_dir, 'node')[1]['id']
|
|
|
|
|
|
|
|
os.rename(
|
|
|
|
os.path.join(backup_dir, 'backups', 'node', backup_id),
|
|
|
|
os.path.join(backup_dir, 'backups', 'node', new_id))
|
|
|
|
|
2020-05-11 20:23:25 +02:00
|
|
|
pgbench = node.pgbench(options=['-T', '5', '-c', '1', '--no-vacuum'])
|
2019-03-27 17:16:53 +02:00
|
|
|
pgbench.wait()
|
|
|
|
|
2020-05-11 20:23:25 +02:00
|
|
|
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
|
2019-03-27 17:16:53 +02:00
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
2019-12-28 17:53:33 +02:00
|
|
|
result = node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'SELECT * from pgbench_accounts')
|
|
|
|
|
2019-07-12 17:01:28 +02:00
|
|
|
node_restored = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node_restored'))
|
|
|
|
node_restored.cleanup()
|
2019-03-27 17:16:53 +02:00
|
|
|
|
2019-07-12 17:01:28 +02:00
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node',
|
|
|
|
node_restored, backup_id=backup_id)
|
2019-03-27 17:16:53 +02:00
|
|
|
|
2019-07-12 17:01:28 +02:00
|
|
|
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# check that merged backup has the same state as
|
|
|
|
node_restored.cleanup()
|
|
|
|
self.merge_backup(backup_dir, 'node', backup_id=backup_id)
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node',
|
|
|
|
node_restored, backup_id=backup_id)
|
|
|
|
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
2019-12-28 17:53:33 +02:00
|
|
|
|
|
|
|
self.set_auto_conf(
|
|
|
|
node_restored,
|
|
|
|
{'port': node_restored.port})
|
|
|
|
node_restored.slow_start()
|
|
|
|
|
|
|
|
result_new = node_restored.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'SELECT * from pgbench_accounts')
|
|
|
|
|
|
|
|
self.assertTrue(result, result_new)
|
|
|
|
|
2019-03-27 17:16:53 +02:00
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
2019-03-09 13:25:50 +02:00
|
|
|
|
2019-03-28 20:58:15 +02:00
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_merge_multiple_descendants(self):
|
|
|
|
"""
|
|
|
|
PAGEb3
|
|
|
|
| PAGEa3
|
|
|
|
PAGEb2 /
|
|
|
|
| PAGEa2 /
|
|
|
|
PAGEb1 \ /
|
|
|
|
| PAGEa1
|
|
|
|
FULLb |
|
|
|
|
FULLa
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
initdb_params=['--data-checksums'])
|
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# Take FULL BACKUPs
|
|
|
|
backup_id_a = self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
backup_id_b = self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
# Change FULLb backup status to ERROR
|
|
|
|
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
|
|
|
|
|
|
|
|
page_id_a1 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# Change FULLb backup status to OK
|
|
|
|
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
|
|
|
|
|
|
|
|
# Change PAGEa1 backup status to ERROR
|
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
|
|
|
|
|
|
|
|
# PAGEa1 ERROR
|
|
|
|
# FULLb OK
|
|
|
|
# FULLa OK
|
|
|
|
|
|
|
|
page_id_b1 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# PAGEb1 OK
|
|
|
|
# PAGEa1 ERROR
|
|
|
|
# FULLb OK
|
|
|
|
# FULLa OK
|
|
|
|
|
2019-07-01 15:24:57 +02:00
|
|
|
# Change PAGEa1 to OK
|
2019-03-28 20:58:15 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
|
|
|
|
|
2019-07-01 15:24:57 +02:00
|
|
|
# Change PAGEb1 and FULLb to ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
|
2019-07-01 15:24:57 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
|
2019-03-28 20:58:15 +02:00
|
|
|
|
|
|
|
# PAGEb1 ERROR
|
|
|
|
# PAGEa1 OK
|
2019-07-01 15:24:57 +02:00
|
|
|
# FULLb ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
# FULLa OK
|
|
|
|
|
|
|
|
page_id_a2 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# PAGEa2 OK
|
|
|
|
# PAGEb1 ERROR
|
|
|
|
# PAGEa1 OK
|
2019-07-01 15:24:57 +02:00
|
|
|
# FULLb ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
# FULLa OK
|
|
|
|
|
2019-07-01 15:24:57 +02:00
|
|
|
# Change PAGEb1 and FULLb to OK
|
2019-03-28 20:58:15 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
|
2019-07-01 15:24:57 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
|
2019-03-28 20:58:15 +02:00
|
|
|
|
2019-07-01 15:24:57 +02:00
|
|
|
# Change PAGEa2 and FULL to ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
|
2019-07-01 15:24:57 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
|
2019-03-28 20:58:15 +02:00
|
|
|
|
|
|
|
# PAGEa2 ERROR
|
|
|
|
# PAGEb1 OK
|
|
|
|
# PAGEa1 OK
|
|
|
|
# FULLb OK
|
2019-07-01 15:24:57 +02:00
|
|
|
# FULLa ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
|
|
|
|
page_id_b2 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# PAGEb2 OK
|
|
|
|
# PAGEa2 ERROR
|
|
|
|
# PAGEb1 OK
|
|
|
|
# PAGEa1 OK
|
|
|
|
# FULLb OK
|
2019-07-01 15:24:57 +02:00
|
|
|
# FULLa ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
|
2019-07-01 15:24:57 +02:00
|
|
|
# Change PAGEb2, PAGEb1 and FULLb to ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR')
|
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
|
2019-07-01 15:24:57 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
|
|
|
|
|
|
|
|
# Change FULLa to OK
|
|
|
|
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
|
2019-03-28 20:58:15 +02:00
|
|
|
|
|
|
|
# PAGEb2 ERROR
|
|
|
|
# PAGEa2 ERROR
|
|
|
|
# PAGEb1 ERROR
|
|
|
|
# PAGEa1 OK
|
2019-07-01 15:24:57 +02:00
|
|
|
# FULLb ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
# FULLa OK
|
|
|
|
|
|
|
|
page_id_a3 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# PAGEa3 OK
|
|
|
|
# PAGEb2 ERROR
|
|
|
|
# PAGEa2 ERROR
|
|
|
|
# PAGEb1 ERROR
|
|
|
|
# PAGEa1 OK
|
2019-07-01 15:24:57 +02:00
|
|
|
# FULLb ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
# FULLa OK
|
|
|
|
|
2019-07-01 15:24:57 +02:00
|
|
|
# Change PAGEa3 and FULLa to ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR')
|
|
|
|
|
2019-07-01 15:24:57 +02:00
|
|
|
# Change PAGEb2, PAGEb1 and FULLb to OK
|
2019-03-28 20:58:15 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK')
|
2019-07-01 15:24:57 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
|
|
|
|
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
|
2019-03-28 20:58:15 +02:00
|
|
|
|
|
|
|
page_id_b3 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# PAGEb3 OK
|
|
|
|
# PAGEa3 ERROR
|
|
|
|
# PAGEb2 OK
|
|
|
|
# PAGEa2 ERROR
|
2019-07-01 15:24:57 +02:00
|
|
|
# PAGEb1 OK
|
2019-03-28 20:58:15 +02:00
|
|
|
# PAGEa1 OK
|
|
|
|
# FULLb OK
|
2019-07-01 15:24:57 +02:00
|
|
|
# FULLa ERROR
|
2019-03-28 20:58:15 +02:00
|
|
|
|
2019-07-01 15:24:57 +02:00
|
|
|
# Change PAGEa3, PAGEa2 and FULLa status to OK
|
2019-03-28 20:58:15 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK')
|
|
|
|
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
|
2019-07-01 15:24:57 +02:00
|
|
|
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
|
2019-03-28 20:58:15 +02:00
|
|
|
|
|
|
|
# PAGEb3 OK
|
|
|
|
# PAGEa3 OK
|
|
|
|
# PAGEb2 OK
|
|
|
|
# PAGEa2 OK
|
|
|
|
# PAGEb1 OK
|
|
|
|
# PAGEa1 OK
|
|
|
|
# FULLb OK
|
|
|
|
# FULLa OK
|
|
|
|
|
|
|
|
# Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1
|
|
|
|
self.assertEqual(
|
|
|
|
self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'],
|
|
|
|
page_id_a1)
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'],
|
|
|
|
page_id_a1)
|
|
|
|
|
|
|
|
self.merge_backup(
|
|
|
|
backup_dir, 'node', page_id_a2,
|
|
|
|
options=['--merge-expired', '--log-level-console=log'])
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.merge_backup(
|
|
|
|
backup_dir, 'node', page_id_a3,
|
|
|
|
options=['--merge-expired', '--log-level-console=log'])
|
|
|
|
self.assertEqual(
|
|
|
|
1, 0,
|
|
|
|
"Expecting Error because of parent FULL backup is missing.\n "
|
|
|
|
"Output: {0} \n CMD: {1}".format(
|
|
|
|
repr(self.output), self.cmd))
|
|
|
|
except ProbackupException as e:
|
|
|
|
self.assertTrue(
|
2020-02-21 21:19:52 +02:00
|
|
|
"ERROR: Failed to find parent full backup for {0}".format(
|
2019-03-28 20:58:15 +02:00
|
|
|
page_id_a3) in e.message,
|
|
|
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
|
|
|
repr(e.message), self.cmd))
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2019-05-06 17:04:49 +02:00
|
|
|
# @unittest.skip("skip")
|
|
|
|
def test_smart_merge(self):
|
|
|
|
"""
|
|
|
|
make node, create database, take full backup, drop database,
|
|
|
|
take PAGE backup and merge it into FULL,
|
|
|
|
make sure that files from dropped database are not
|
|
|
|
copied during restore
|
|
|
|
https://github.com/postgrespro/pg_probackup/issues/63
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'])
|
|
|
|
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# create database
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"CREATE DATABASE testdb")
|
|
|
|
|
|
|
|
# take FULL backup
|
|
|
|
full_id = self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
# drop database
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"DROP DATABASE testdb")
|
|
|
|
|
|
|
|
# take PAGE backup
|
|
|
|
page_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# get delta between FULL and PAGE filelists
|
|
|
|
filelist_full = self.get_backup_filelist(
|
|
|
|
backup_dir, 'node', full_id)
|
|
|
|
|
|
|
|
filelist_page = self.get_backup_filelist(
|
|
|
|
backup_dir, 'node', page_id)
|
|
|
|
|
|
|
|
filelist_diff = self.get_backup_filelist_diff(
|
|
|
|
filelist_full, filelist_page)
|
|
|
|
|
|
|
|
# merge PAGE backup
|
|
|
|
self.merge_backup(
|
|
|
|
backup_dir, 'node', page_id,
|
|
|
|
options=['--log-level-file=VERBOSE'])
|
|
|
|
|
|
|
|
logfile = os.path.join(backup_dir, 'log', 'pg_probackup.log')
|
|
|
|
with open(logfile, 'r') as f:
|
|
|
|
logfile_content = f.read()
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2020-03-06 10:48:11 +02:00
|
|
|
def test_idempotent_merge(self):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# add database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'CREATE DATABASE testdb')
|
|
|
|
|
|
|
|
# take FULL backup
|
|
|
|
full_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
|
|
|
# create database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'create DATABASE testdb1')
|
|
|
|
|
|
|
|
# take PAGE backup
|
|
|
|
page_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
# create database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'create DATABASE testdb2')
|
|
|
|
|
|
|
|
page_id_2 = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
gdb = self.merge_backup(
|
|
|
|
backup_dir, 'node', page_id_2,
|
|
|
|
gdb=True, options=['--log-level-console=verbose'])
|
|
|
|
|
|
|
|
gdb.set_breakpoint('delete_backup_files')
|
|
|
|
gdb.run_until_break()
|
|
|
|
gdb.remove_all_breakpoints()
|
|
|
|
|
|
|
|
gdb.set_breakpoint('rename')
|
|
|
|
gdb.continue_execution_until_break()
|
|
|
|
gdb.continue_execution_until_break(2)
|
|
|
|
|
|
|
|
gdb._execute('signal SIGKILL')
|
|
|
|
|
|
|
|
show_backups = self.show_pb(backup_dir, "node")
|
|
|
|
self.assertEqual(len(show_backups), 1)
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'MERGED', self.show_pb(backup_dir, 'node')[0]['status'])
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
full_id, self.show_pb(backup_dir, 'node')[0]['id'])
|
|
|
|
|
|
|
|
self.merge_backup(backup_dir, 'node', page_id_2)
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
|
2019-03-28 20:58:15 +02:00
|
|
|
|
2020-03-06 10:48:11 +02:00
|
|
|
self.assertEqual(
|
|
|
|
page_id_2, self.show_pb(backup_dir, 'node')[0]['id'])
|
2018-10-27 11:17:22 +02:00
|
|
|
|
|
|
|
|
2020-03-06 10:48:11 +02:00
|
|
|
self.del_test_dir(module_name, fname)
|
2018-10-27 11:17:22 +02:00
|
|
|
|
2020-04-19 01:50:49 +02:00
|
|
|
def test_merge_correct_inheritance(self):
|
|
|
|
"""
|
|
|
|
Make sure that backup metainformation fields
|
|
|
|
'note' and 'expire-time' are correctly inherited
|
|
|
|
during merge
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# add database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'CREATE DATABASE testdb')
|
|
|
|
|
|
|
|
# take FULL backup
|
|
|
|
self.backup_node(backup_dir, 'node', node, options=['--stream'])
|
|
|
|
|
|
|
|
# create database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'create DATABASE testdb1')
|
|
|
|
|
|
|
|
# take PAGE backup
|
|
|
|
page_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
self.set_backup(
|
|
|
|
backup_dir, 'node', page_id, options=['--note=hello', '--ttl=20d'])
|
|
|
|
|
|
|
|
page_meta = self.show_pb(backup_dir, 'node', page_id)
|
|
|
|
|
|
|
|
self.merge_backup(backup_dir, 'node', page_id)
|
|
|
|
|
|
|
|
print(self.show_pb(backup_dir, 'node', page_id))
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
page_meta['note'],
|
|
|
|
self.show_pb(backup_dir, 'node', page_id)['note'])
|
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
page_meta['expire-time'],
|
|
|
|
self.show_pb(backup_dir, 'node', page_id)['expire-time'])
|
|
|
|
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2020-04-20 13:11:20 +02:00
|
|
|
def test_merge_correct_inheritance_1(self):
|
|
|
|
"""
|
|
|
|
Make sure that backup metainformation fields
|
|
|
|
'note' and 'expire-time' are correctly inherited
|
|
|
|
during merge
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# add database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'CREATE DATABASE testdb')
|
|
|
|
|
|
|
|
# take FULL backup
|
|
|
|
self.backup_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=['--stream', '--note=hello', '--ttl=20d'])
|
|
|
|
|
|
|
|
# create database
|
|
|
|
node.safe_psql(
|
|
|
|
'postgres',
|
|
|
|
'create DATABASE testdb1')
|
|
|
|
|
|
|
|
# take PAGE backup
|
|
|
|
page_id = self.backup_node(
|
|
|
|
backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
self.merge_backup(backup_dir, 'node', page_id)
|
|
|
|
|
|
|
|
self.assertNotIn(
|
|
|
|
'note',
|
|
|
|
self.show_pb(backup_dir, 'node', page_id))
|
|
|
|
|
|
|
|
self.assertNotIn(
|
|
|
|
'expire-time',
|
|
|
|
self.show_pb(backup_dir, 'node', page_id))
|
|
|
|
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2020-05-02 20:29:35 +02:00
|
|
|
# @unittest.skip("skip")
|
|
|
|
# @unittest.expectedFailure
|
|
|
|
def test_multi_timeline_merge(self):
|
|
|
|
"""
|
|
|
|
Check that backup in PAGE mode choose
|
|
|
|
parent backup correctly:
|
|
|
|
t12 /---P-->
|
|
|
|
...
|
|
|
|
t3 /---->
|
|
|
|
t2 /---->
|
|
|
|
t1 -F-----D->
|
|
|
|
|
|
|
|
P must have F as parent
|
|
|
|
"""
|
|
|
|
fname = self.id().split('.')[3]
|
|
|
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
|
|
|
node = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node'),
|
|
|
|
set_replication=True,
|
|
|
|
initdb_params=['--data-checksums'],
|
|
|
|
pg_options={'autovacuum': 'off'})
|
|
|
|
|
|
|
|
self.init_pb(backup_dir)
|
|
|
|
self.add_instance(backup_dir, 'node', node)
|
|
|
|
self.set_archiving(backup_dir, 'node', node)
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
node.safe_psql("postgres", "create extension pageinspect")
|
|
|
|
|
|
|
|
try:
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create extension amcheck")
|
|
|
|
except QueryException as e:
|
|
|
|
node.safe_psql(
|
|
|
|
"postgres",
|
|
|
|
"create extension amcheck_next")
|
|
|
|
|
|
|
|
node.pgbench_init(scale=20)
|
|
|
|
full_id = self.backup_node(backup_dir, 'node', node)
|
|
|
|
|
|
|
|
pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
self.backup_node(backup_dir, 'node', node, backup_type='delta')
|
|
|
|
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node, backup_id=full_id,
|
|
|
|
options=[
|
|
|
|
'--recovery-target=immediate',
|
|
|
|
'--recovery-target-action=promote'])
|
|
|
|
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
# create timelines
|
|
|
|
for i in range(2, 7):
|
|
|
|
node.cleanup()
|
|
|
|
self.restore_node(
|
|
|
|
backup_dir, 'node', node,
|
|
|
|
options=[
|
|
|
|
'--recovery-target=latest',
|
|
|
|
'--recovery-target-action=promote',
|
|
|
|
'--recovery-target-timeline={0}'.format(i)])
|
|
|
|
node.slow_start()
|
|
|
|
|
|
|
|
# at this point there is i+1 timeline
|
|
|
|
pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum'])
|
|
|
|
pgbench.wait()
|
|
|
|
|
|
|
|
# create backup at 2, 4 and 6 timeline
|
|
|
|
if i % 2 == 0:
|
|
|
|
self.backup_node(backup_dir, 'node', node, backup_type='page')
|
|
|
|
|
|
|
|
page_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
|
|
|
|
pgdata = self.pgdata_content(node.data_dir)
|
|
|
|
|
|
|
|
self.merge_backup(backup_dir, 'node', page_id)
|
|
|
|
|
|
|
|
result = node.safe_psql(
|
|
|
|
"postgres", "select * from pgbench_accounts")
|
|
|
|
|
|
|
|
node_restored = self.make_simple_node(
|
|
|
|
base_dir=os.path.join(module_name, fname, 'node_restored'))
|
|
|
|
node_restored.cleanup()
|
|
|
|
|
|
|
|
self.restore_node(backup_dir, 'node', node_restored)
|
|
|
|
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
|
|
|
|
|
|
|
self.set_auto_conf(node_restored, {'port': node_restored.port})
|
|
|
|
node_restored.slow_start()
|
|
|
|
|
|
|
|
result_new = node_restored.safe_psql(
|
|
|
|
"postgres", "select * from pgbench_accounts")
|
|
|
|
|
|
|
|
self.assertEqual(result, result_new)
|
|
|
|
|
|
|
|
self.compare_pgdata(pgdata, pgdata_restored)
|
|
|
|
|
|
|
|
self.checkdb_node(
|
|
|
|
backup_dir,
|
|
|
|
'node',
|
|
|
|
options=[
|
|
|
|
'--amcheck',
|
|
|
|
'-d', 'postgres', '-p', str(node.port)])
|
|
|
|
|
|
|
|
self.checkdb_node(
|
|
|
|
backup_dir,
|
|
|
|
'node',
|
|
|
|
options=[
|
|
|
|
'--amcheck',
|
|
|
|
'-d', 'postgres', '-p', str(node_restored.port)])
|
|
|
|
|
|
|
|
# Clean after yourself
|
|
|
|
self.del_test_dir(module_name, fname)
|
|
|
|
|
2020-03-06 10:48:11 +02:00
|
|
|
# 1. Need new test with corrupted FULL backup
|
|
|
|
# 2. different compression levels
|