mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-01-20 11:34:51 +02:00
Merge remote-tracking branch 'origin/master' into pgpro-1457
This commit is contained in:
commit
14e1ae264b
17
README.md
17
README.md
@ -47,18 +47,27 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp
|
||||
#DEB Ubuntu|Debian Packages
|
||||
echo "deb [arch=amd64] http://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list
|
||||
wget -O - http://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | apt-key add - && apt-get update
|
||||
apt-get install pg-probackup-(10|9.6|9.5)
|
||||
apt-get install pg-probackup-{10,9.6,9.5}
|
||||
|
||||
#DEB-SRC Packages
|
||||
echo "deb-src [arch=amd64] http://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\
|
||||
/etc/apt/sources.list.d/pg_probackup.list
|
||||
apt-get source pg-probackup-{10,9.6,9.5}
|
||||
|
||||
#RPM Centos Packages
|
||||
rpm -ivh http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm
|
||||
yum install pg_probackup-(10|9.6|9.5)
|
||||
yum install pg_probackup-{10,9.6,9.5}
|
||||
|
||||
#SRPM Centos Packages
|
||||
yumdownloader --source pg_probackup-(10|9.6|9.5)
|
||||
#RPM RHEL Packages
|
||||
rpm -ivh http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm
|
||||
yum install pg_probackup-{10,9.6,9.5}
|
||||
|
||||
#RPM Oracle Linux Packages
|
||||
rpm -ivh http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm
|
||||
yum install pg_probackup-{10,9.6,9.5}
|
||||
|
||||
#SRPM Oracle Linux Packages
|
||||
yumdownloader --source pg_probackup-{10,9.6,9.5}
|
||||
```
|
||||
|
||||
To compile `pg_probackup`, you must have a PostgreSQL installation and raw source tree. To install `pg_probackup`, execute this in the module's directory:
|
||||
|
50
src/backup.c
50
src/backup.c
@ -942,7 +942,7 @@ confirm_block_size(const char *name, int blcksz)
|
||||
char *endp;
|
||||
int block_size;
|
||||
|
||||
res = pgut_execute(backup_conn, "SELECT current_setting($1)", 1, &name, true);
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.current_setting($1)", 1, &name, true);
|
||||
if (PQntuples(res) != 1 || PQnfields(res) != 1)
|
||||
elog(ERROR, "cannot get %s: %s", name, PQerrorMessage(backup_conn));
|
||||
|
||||
@ -976,13 +976,13 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
||||
params[1] = smooth ? "false" : "true";
|
||||
if (!exclusive_backup)
|
||||
res = pgut_execute(conn,
|
||||
"SELECT pg_start_backup($1, $2, false)",
|
||||
"SELECT pg_catalog.pg_start_backup($1, $2, false)",
|
||||
2,
|
||||
params,
|
||||
true);
|
||||
else
|
||||
res = pgut_execute(conn,
|
||||
"SELECT pg_start_backup($1, $2)",
|
||||
"SELECT pg_catalog.pg_start_backup($1, $2)",
|
||||
2,
|
||||
params,
|
||||
true);
|
||||
@ -1040,9 +1040,9 @@ pg_switch_wal(PGconn *conn)
|
||||
PQclear(res);
|
||||
|
||||
if (server_version >= 100000)
|
||||
res = pgut_execute(conn, "SELECT * FROM pg_switch_wal()", 0, NULL, true);
|
||||
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_wal()", 0, NULL, true);
|
||||
else
|
||||
res = pgut_execute(conn, "SELECT * FROM pg_switch_xlog()", 0, NULL, true);
|
||||
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_xlog()", 0, NULL, true);
|
||||
|
||||
PQclear(res);
|
||||
}
|
||||
@ -1067,7 +1067,7 @@ pg_ptrack_support(void)
|
||||
PQclear(res_db);
|
||||
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT ptrack_version()",
|
||||
"SELECT pg_catalog.ptrack_version()",
|
||||
0, NULL, true);
|
||||
if (PQntuples(res_db) == 0)
|
||||
{
|
||||
@ -1127,7 +1127,7 @@ pg_is_in_recovery(void)
|
||||
{
|
||||
PGresult *res_db;
|
||||
|
||||
res_db = pgut_execute(backup_conn, "SELECT pg_is_in_recovery()", 0, NULL, true);
|
||||
res_db = pgut_execute(backup_conn, "SELECT pg_catalog.pg_is_in_recovery()", 0, NULL, true);
|
||||
|
||||
if (PQgetvalue(res_db, 0, 0)[0] == 't')
|
||||
{
|
||||
@ -1166,11 +1166,11 @@ pg_ptrack_clear(void)
|
||||
tblspcOid = atoi(PQgetvalue(res_db, i, 2));
|
||||
|
||||
tmp_conn = pgut_connect(dbname);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_ptrack_clear()", 0, NULL, true);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_clear()", 0, NULL, true);
|
||||
|
||||
sprintf(params[0], "%i", dbOid);
|
||||
sprintf(params[1], "%i", tblspcOid);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_ptrack_get_and_clear_db($1, $2)",
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
|
||||
2, (const char **)params, true);
|
||||
PQclear(res);
|
||||
|
||||
@ -1217,7 +1217,7 @@ pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid)
|
||||
|
||||
sprintf(params[0], "%i", dbOid);
|
||||
sprintf(params[1], "%i", tblspcOid);
|
||||
res = pgut_execute(backup_conn, "SELECT pg_ptrack_get_and_clear_db($1, $2)",
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
|
||||
2, (const char **)params, true);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
@ -1278,7 +1278,7 @@ pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode,
|
||||
tmp_conn = pgut_connect(dbname);
|
||||
sprintf(params[0], "%i", tablespace_oid);
|
||||
sprintf(params[1], "%i", rel_filenode);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_ptrack_get_and_clear($1, $2)",
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
|
||||
2, (const char **)params, true);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
@ -1296,7 +1296,7 @@ pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode,
|
||||
*/
|
||||
sprintf(params[0], "%i", tablespace_oid);
|
||||
sprintf(params[1], "%i", rel_filenode);
|
||||
res = pgut_execute(backup_conn, "SELECT pg_ptrack_get_and_clear($1, $2)",
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
|
||||
2, (const char **)params, true);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
@ -1477,10 +1477,10 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
|
||||
if (is_start_backup)
|
||||
{
|
||||
if (server_version >= 100000)
|
||||
res = pgut_execute(backup_conn, "SELECT pg_last_wal_replay_lsn()",
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_replay_lsn()",
|
||||
0, NULL, true);
|
||||
else
|
||||
res = pgut_execute(backup_conn, "SELECT pg_last_xlog_replay_location()",
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_replay_location()",
|
||||
0, NULL, true);
|
||||
}
|
||||
/*
|
||||
@ -1490,10 +1490,10 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
|
||||
else
|
||||
{
|
||||
if (server_version >= 100000)
|
||||
res = pgut_execute(backup_conn, "SELECT pg_last_wal_receive_lsn()",
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_receive_lsn()",
|
||||
0, NULL, true);
|
||||
else
|
||||
res = pgut_execute(backup_conn, "SELECT pg_last_xlog_receive_location()",
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_receive_location()",
|
||||
0, NULL, true);
|
||||
}
|
||||
|
||||
@ -1577,7 +1577,7 @@ pg_stop_backup(pgBackup *backup)
|
||||
base36enc(backup->start_time));
|
||||
params[0] = name;
|
||||
|
||||
res = pgut_execute(conn, "SELECT pg_create_restore_point($1)",
|
||||
res = pgut_execute(conn, "SELECT pg_catalog.pg_create_restore_point($1)",
|
||||
1, params, true);
|
||||
PQclear(res);
|
||||
}
|
||||
@ -1602,12 +1602,12 @@ pg_stop_backup(pgBackup *backup)
|
||||
*/
|
||||
sent = pgut_send(conn,
|
||||
"SELECT"
|
||||
" txid_snapshot_xmax(txid_current_snapshot()),"
|
||||
" pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
|
||||
" current_timestamp(0)::timestamptz,"
|
||||
" lsn,"
|
||||
" labelfile,"
|
||||
" spcmapfile"
|
||||
" FROM pg_stop_backup(false)",
|
||||
" FROM pg_catalog.pg_stop_backup(false)",
|
||||
0, NULL, WARNING);
|
||||
}
|
||||
else
|
||||
@ -1615,9 +1615,9 @@ pg_stop_backup(pgBackup *backup)
|
||||
|
||||
sent = pgut_send(conn,
|
||||
"SELECT"
|
||||
" txid_snapshot_xmax(txid_current_snapshot()),"
|
||||
" pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
|
||||
" current_timestamp(0)::timestamptz,"
|
||||
" pg_stop_backup() as lsn",
|
||||
" pg_catalog.pg_stop_backup() as lsn",
|
||||
0, NULL, WARNING);
|
||||
}
|
||||
pg_stop_backup_is_sent = true;
|
||||
@ -2139,8 +2139,8 @@ parse_backup_filelist_filenames(parray *files, const char *root)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check files located inside database directories */
|
||||
if (filename[0] != '\0' && file->dbOid != 0)
|
||||
/* Check files located inside database directories including directory 'global' */
|
||||
if (filename[0] != '\0' && file->tblspcOid != 0)
|
||||
{
|
||||
if (strcmp(filename, "pg_internal.init") == 0)
|
||||
{
|
||||
@ -2700,7 +2700,7 @@ get_last_ptrack_lsn(void)
|
||||
uint32 xrecoff;
|
||||
XLogRecPtr lsn;
|
||||
|
||||
res = pgut_execute(backup_conn, "select pg_ptrack_control_lsn()", 0, NULL, true);
|
||||
res = pgut_execute(backup_conn, "select pg_catalog.pg_ptrack_control_lsn()", 0, NULL, true);
|
||||
|
||||
/* Extract timeline and LSN from results of pg_start_backup() */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
|
||||
@ -2748,7 +2748,7 @@ pg_ptrack_get_block(backup_files_args *arguments,
|
||||
//elog(LOG, "db %i pg_ptrack_get_block(%i, %i, %u)",dbOid, tblsOid, relOid, blknum);
|
||||
res = pgut_execute_parallel(arguments->thread_backup_conn,
|
||||
arguments->thread_cancel_conn,
|
||||
"SELECT pg_ptrack_get_block_2($1, $2, $3, $4)",
|
||||
"SELECT pg_catalog.pg_ptrack_get_block_2($1, $2, $3, $4)",
|
||||
4, (const char **)params, true);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
|
@ -95,7 +95,7 @@ fetchFile(PGconn *conn, const char *filename, size_t *filesize)
|
||||
int len;
|
||||
|
||||
params[0] = filename;
|
||||
res = pgut_execute(conn, "SELECT pg_read_binary_file($1)",
|
||||
res = pgut_execute(conn, "SELECT pg_catalog.pg_read_binary_file($1)",
|
||||
1, params, false);
|
||||
|
||||
/* sanity check the result set */
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
const char *PROGRAM_VERSION = "2.0.15";
|
||||
const char *PROGRAM_VERSION = "2.0.16";
|
||||
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
|
||||
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
|
||||
|
||||
@ -437,7 +437,7 @@ main(int argc, char *argv[])
|
||||
|
||||
elog(INFO, "Backup start, pg_probackup version: %s, backup ID: %s, backup mode: %s, instance: %s, stream: %s, remote: %s",
|
||||
PROGRAM_VERSION, base36enc(start_time), backup_mode, instance_name,
|
||||
current.stream ? "true" : "false", is_remote_backup ? "true" : "false");
|
||||
stream_wal ? "true" : "false", is_remote_backup ? "true" : "false");
|
||||
|
||||
return do_backup(start_time);
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ get_remote_system_identifier(PGconn *conn)
|
||||
char *val;
|
||||
|
||||
res = pgut_execute(conn,
|
||||
"SELECT system_identifier FROM pg_control_system()",
|
||||
"SELECT system_identifier FROM pg_catalog.pg_control_system()",
|
||||
0, NULL, true);
|
||||
val = PQgetvalue(res, 0, 0);
|
||||
if (!parse_uint64(val, &system_id_conn, 0))
|
||||
|
@ -198,7 +198,7 @@ class AuthTest(unittest.TestCase):
|
||||
def run_pb_with_auth(cmd, password=None, kill=False):
|
||||
try:
|
||||
with spawn(" ".join(cmd), encoding='utf-8', timeout=10) as probackup:
|
||||
result = probackup.expect("Password for user .*:", 5)
|
||||
result = probackup.expect(u"Password for user .*:", 5)
|
||||
if kill:
|
||||
probackup.kill(signal.SIGINT)
|
||||
elif result == 0:
|
||||
|
@ -292,7 +292,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_checksumm_fail_heal_via_ptrack(self):
|
||||
def test_page_corruption_heal_via_ptrack_1(self):
|
||||
"""make node, corrupt some page, check that backup failed"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
@ -338,7 +338,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
|
||||
log_content = f.read()
|
||||
self.assertIn('block 1, try to fetch via SQL', log_content)
|
||||
self.assertIn('SELECT pg_ptrack_get_block', log_content)
|
||||
self.assertIn('SELECT pg_catalog.pg_ptrack_get_block', log_content)
|
||||
f.close
|
||||
|
||||
self.assertTrue(
|
||||
@ -349,7 +349,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_checksumm_fail_heal_via_ptrack_fail(self):
|
||||
def test_page_corruption_heal_via_ptrack_2(self):
|
||||
"""make node, corrupt some page, check that backup failed"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
@ -410,7 +410,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
"calculated checksum" in e.message and
|
||||
"ERROR: query failed: "
|
||||
"ERROR: invalid page in block" in e.message and
|
||||
"query was: SELECT pg_ptrack_get_block_2" in e.message,
|
||||
"query was: SELECT pg_catalog.pg_ptrack_get_block_2" in e.message,
|
||||
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
|
@ -1 +1 @@
|
||||
pg_probackup 2.0.15
|
||||
pg_probackup 2.0.16
|
@ -96,7 +96,7 @@ def is_enterprise():
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
)
|
||||
if 'postgrespro.ru' in p.communicate()[0]:
|
||||
if b'postgrespro.ru' in p.communicate()[0]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -884,7 +884,7 @@ class ProbackupTest(object):
|
||||
except:
|
||||
pass
|
||||
|
||||
def pgdata_content(self, directory):
|
||||
def pgdata_content(self, directory, ignore_ptrack=True):
|
||||
""" return dict with directory content. "
|
||||
" TAKE IT AFTER CHECKPOINT or BACKUP"""
|
||||
dirs_to_ignore = [
|
||||
@ -897,9 +897,9 @@ class ProbackupTest(object):
|
||||
'backup_label', 'tablespace_map', 'recovery.conf',
|
||||
'ptrack_control', 'ptrack_init', 'pg_control'
|
||||
]
|
||||
suffixes_to_ignore = (
|
||||
'_ptrack'
|
||||
)
|
||||
# suffixes_to_ignore = (
|
||||
# '_ptrack'
|
||||
# )
|
||||
directory_dict = {}
|
||||
directory_dict['pgdata'] = directory
|
||||
directory_dict['files'] = {}
|
||||
@ -908,7 +908,7 @@ class ProbackupTest(object):
|
||||
for file in files:
|
||||
if (
|
||||
file in files_to_ignore or
|
||||
file.endswith(suffixes_to_ignore)
|
||||
(ignore_ptrack and file.endswith('_ptrack'))
|
||||
):
|
||||
continue
|
||||
|
||||
|
@ -26,7 +26,8 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '300s'
|
||||
'checkpoint_timeout': '300s',
|
||||
'autovacuum': 'off'
|
||||
}
|
||||
)
|
||||
node_restored = self.make_simple_node(
|
||||
@ -70,8 +71,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
options=['--log-level-file=verbose']
|
||||
backup_dir, 'node', node, backup_type='page'
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
@ -95,6 +95,21 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
node_restored.start()
|
||||
|
||||
while node_restored.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
# Logical comparison
|
||||
result1 = node.safe_psql(
|
||||
"postgres",
|
||||
"select * from t_heap"
|
||||
)
|
||||
result2 = node_restored.safe_psql(
|
||||
"postgres",
|
||||
"select * from t_heap"
|
||||
)
|
||||
self.assertEqual(result1, result2)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@ -235,12 +250,20 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
"""Make node, create table with multiple segments, write some data to it, check page and data correctness"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica', 'max_wal_senders': '2',
|
||||
'ptrack_enable': 'on', 'fsync': 'off', 'shared_buffers': '1GB',
|
||||
'maintenance_work_mem': '1GB', 'autovacuum': 'off', 'full_page_writes': 'off'}
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'ptrack_enable': 'on',
|
||||
'fsync': 'off',
|
||||
'shared_buffers': '1GB',
|
||||
'maintenance_work_mem': '1GB',
|
||||
'autovacuum': 'off',
|
||||
'full_page_writes': 'off'
|
||||
}
|
||||
)
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
@ -256,7 +279,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
# PGBENCH STUFF
|
||||
pgbench = node.pgbench(options=['-T', '150', '-c', '2', '--no-vacuum'])
|
||||
pgbench = node.pgbench(options=['-T', '50', '-c', '1', '--no-vacuum'])
|
||||
pgbench.wait()
|
||||
node.safe_psql("postgres", "checkpoint")
|
||||
|
||||
@ -280,12 +303,15 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
pgdata_restored = self.pgdata_content(restored_node.data_dir)
|
||||
|
||||
# START RESTORED NODE
|
||||
restored_node.append_conf("postgresql.auto.conf", "port = {0}".format(restored_node.port))
|
||||
restored_node.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
|
||||
restored_node.start()
|
||||
while restored_node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
while restored_node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
result_new = restored_node.safe_psql("postgres", "select * from pgbench_accounts")
|
||||
result_new = restored_node.safe_psql(
|
||||
"postgres", "select * from pgbench_accounts")
|
||||
|
||||
# COMPARE RESTORED FILES
|
||||
self.assertEqual(result, result_new, 'data is lost')
|
||||
|
@ -172,7 +172,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node_restored.append_conf(
|
||||
@ -268,7 +268,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node_restored.append_conf(
|
||||
@ -295,7 +295,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '300s',
|
||||
'ptrack_enable': 'on'
|
||||
'ptrack_enable': 'on',
|
||||
'autovacuum': 'off'
|
||||
}
|
||||
)
|
||||
node_restored = self.make_simple_node(
|
||||
@ -357,7 +358,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node_restored.data_dir,
|
||||
ignore_ptrack=False
|
||||
)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node_restored.append_conf(
|
||||
@ -426,7 +430,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node_restored.append_conf(
|
||||
@ -499,7 +503,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
@ -513,6 +517,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
@unittest.expectedFailure
|
||||
def test_ptrack_concurrent_get_and_clear_1(self):
|
||||
"""make node, make full and ptrack stream backups,"
|
||||
" restore them and check data correctness"""
|
||||
@ -582,7 +587,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
@ -596,6 +601,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
@unittest.expectedFailure
|
||||
def test_ptrack_concurrent_get_and_clear_2(self):
|
||||
"""make node, make full and ptrack stream backups,"
|
||||
" restore them and check data correctness"""
|
||||
@ -679,7 +685,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
@ -780,7 +786,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
@ -882,7 +888,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
@ -1188,7 +1194,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
@ -1217,7 +1223,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
@ -1321,7 +1327,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# GET RESTORED PGDATA AND COMPARE
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
@ -1399,7 +1405,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# GET PHYSICAL CONTENT and COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
@ -1548,7 +1554,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(restored_node.data_dir)
|
||||
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
@ -1584,7 +1590,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(restored_node.data_dir)
|
||||
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
@ -1681,7 +1687,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# GET PHYSICAL CONTENT FROM NODE_RESTORED
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(restored_node.data_dir)
|
||||
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
|
@ -837,6 +837,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
node.start()
|
||||
|
||||
backup_id = self.backup_node(backup_dir, 'node', node)
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node.safe_psql("postgres", "create table t_heap(a int)")
|
||||
node.stop()
|
||||
node.cleanup()
|
||||
@ -848,6 +851,10 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
options=["-j", "4", '--time={0}'.format(recovery_time)]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
while node.safe_psql("postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
@ -968,7 +968,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
wals_dir = os.path.join(backup_dir, 'wal', 'node')
|
||||
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
|
||||
wals.sort()
|
||||
file = os.path.join(backup_dir, 'wal', 'node', wals[3])
|
||||
file = os.path.join(backup_dir, 'wal', 'node', wals[-1])
|
||||
os.remove(file)
|
||||
|
||||
# cut out '.gz'
|
||||
@ -1237,7 +1237,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
@unittest.expectedFailure
|
||||
def test_pgpro561(self):
|
||||
"""make node with archiving, make stream backup, restore it to node1, check that archiving is not successful on node1"""
|
||||
fname = self.id().split('.')[3]
|
||||
@ -1290,60 +1290,4 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertFalse('pg_probackup archive-push completed successfully' in log_content)
|
||||
|
||||
# Clean after yourself
|
||||
# self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_pgpro561(self):
|
||||
"""make node with archiving, make stream backup, restore it to node1, check that archiving is not successful on node1"""
|
||||
fname = self.id().split('.')[3]
|
||||
node1 = self.make_simple_node(base_dir="{0}/{1}/node1".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node1', node1)
|
||||
self.set_archiving(backup_dir, 'node1', node1)
|
||||
node1.start()
|
||||
|
||||
backup_id = self.backup_node(backup_dir, 'node1', node1, options=["--stream"])
|
||||
|
||||
node2 = self.make_simple_node(base_dir="{0}/{1}/node2".format(module_name, fname))
|
||||
node2.cleanup()
|
||||
|
||||
node1.psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
|
||||
self.backup_node(backup_dir, 'node1', node1, backup_type='page', options=["--stream"])
|
||||
self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir)
|
||||
node2.append_conf('postgresql.auto.conf', 'port = {0}'.format(node2.port))
|
||||
node2.start()
|
||||
|
||||
timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"]
|
||||
timeline_node2 = node2.get_control_data()["Latest checkpoint's TimeLineID"]
|
||||
self.assertEqual(timeline_node1, timeline_node2, "Timelines on Master and Node1 should be equal. This is unexpected")
|
||||
|
||||
archive_command_node1 = node1.safe_psql("postgres", "show archive_command")
|
||||
archive_command_node2 = node2.safe_psql("postgres", "show archive_command")
|
||||
self.assertEqual(archive_command_node1, archive_command_node2, "Archive command on Master and Node should be equal. This is unexpected")
|
||||
|
||||
#result = node2.safe_psql("postgres", "select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL")
|
||||
## self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip()))
|
||||
#if result == "":
|
||||
# self.assertEqual(1, 0, 'Error is expected due to Master and Node1 having the common archive and archive_command')
|
||||
|
||||
log_file = os.path.join(node2.logs_dir, 'postgresql.log')
|
||||
with open(log_file, 'r') as f:
|
||||
log_content = f.read()
|
||||
self.assertTrue('LOG: archive command failed with exit code 1' in log_content
|
||||
and 'DETAIL: The failed archive command was:' in log_content
|
||||
and 'INFO: pg_probackup archive-push from' in log_content,
|
||||
'Expecting error messages about failed archive_command'
|
||||
)
|
||||
self.assertFalse('pg_probackup archive-push completed successfully' in log_content)
|
||||
|
||||
# Clean after yourself
|
||||
# self.del_test_dir(module_name, fname)
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
Loading…
x
Reference in New Issue
Block a user