mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-03-17 21:18:00 +02:00
Merge remote-tracking branch 'origin/PGPRO11_DEV' into PGPRO11-1121_copy
This commit is contained in:
commit
29540c2caa
6
Makefile
6
Makefile
@ -1,7 +1,7 @@
|
||||
PROGRAM = pg_probackup
|
||||
OBJS = src/backup.o src/catalog.o src/configure.o src/data.o \
|
||||
src/delete.o src/dir.o src/fetch.o src/help.o src/init.o \
|
||||
src/pg_probackup.o src/restore.o src/show.o src/status.o \
|
||||
src/pg_probackup.o src/restore.o src/show.o \
|
||||
src/util.o src/validate.o src/datapagemap.o src/parsexlog.o \
|
||||
src/xlogreader.o src/streamutil.o src/receivelog.o \
|
||||
src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o \
|
||||
@ -32,7 +32,7 @@ else
|
||||
srchome=$(top_srcdir)
|
||||
endif
|
||||
|
||||
ifneq (,$(filter 10 11 12,$(MAJORVERSION)))
|
||||
ifeq (,$(filter 9.5 9.6,$(MAJORVERSION)))
|
||||
OBJS += src/walmethods.o
|
||||
EXTRA_CLEAN += src/walmethods.c src/walmethods.h
|
||||
INCLUDES += src/walmethods.h
|
||||
@ -64,7 +64,7 @@ src/streamutil.h: $(top_srcdir)/src/bin/pg_basebackup/streamutil.h
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/streamutil.h $@
|
||||
|
||||
|
||||
ifneq (,$(filter 10 11 12,$(MAJORVERSION)))
|
||||
ifeq (,$(filter 9.5 9.6,$(MAJORVERSION)))
|
||||
src/walmethods.c: $(top_srcdir)/src/bin/pg_basebackup/walmethods.c
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/walmethods.c $@
|
||||
src/walmethods.h: $(top_srcdir)/src/bin/pg_basebackup/walmethods.h
|
||||
|
@ -3,12 +3,12 @@
|
||||
`pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure.
|
||||
|
||||
The utility is compatible with:
|
||||
* PostgreSQL 9.5, 9.6, 10;
|
||||
* PostgreSQL 9.5, 9.6, 10, 11;
|
||||
|
||||
`PTRACK` backup support provided via following options:
|
||||
* vanilla PostgreSQL compiled with ptrack patch. Currently there are patches for [PostgreSQL 9.6](https://gist.githubusercontent.com/gsmol/5b615c971dfd461c76ef41a118ff4d97/raw/e471251983f14e980041f43bea7709b8246f4178/ptrack_9.6.6_v1.5.patch) and [PostgreSQL 10](https://gist.githubusercontent.com/gsmol/be8ee2a132b88463821021fd910d960e/raw/de24f9499f4f314a4a3e5fae5ed4edb945964df8/ptrack_10.1_v1.5.patch)
|
||||
* Postgres Pro Standard 9.5, 9.6
|
||||
* Postgres Pro Enterprise
|
||||
* Postgres Pro Standard 9.5, 9.6, 10
|
||||
* Postgres Pro Enterprise 9.5, 9.6, 10
|
||||
|
||||
As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data:
|
||||
* Choosing between full and page-level incremental backups to speed up backup and recovery
|
||||
@ -97,4 +97,4 @@ Postgres Professional, Moscow, Russia.
|
||||
|
||||
## Credits
|
||||
|
||||
`pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier.
|
||||
`pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier.
|
||||
|
@ -127,6 +127,7 @@ sub build_pgprobackup
|
||||
#vvs test
|
||||
my $probackup =
|
||||
$solution->AddProject('pg_probackup', 'exe', 'pg_probackup'); #, 'contrib/pg_probackup'
|
||||
$probackup->AddDefine('FRONTEND');
|
||||
$probackup->AddFiles(
|
||||
'contrib/pg_probackup/src',
|
||||
'archive.c',
|
||||
@ -139,11 +140,11 @@ sub build_pgprobackup
|
||||
'fetch.c',
|
||||
'help.c',
|
||||
'init.c',
|
||||
'merge.c',
|
||||
'parsexlog.c',
|
||||
'pg_probackup.c',
|
||||
'restore.c',
|
||||
'show.c',
|
||||
'status.c',
|
||||
'util.c',
|
||||
'validate.c'
|
||||
);
|
||||
|
@ -7,10 +7,10 @@
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
/*
|
||||
* pg_probackup specific archive command for archive backups
|
||||
@ -52,7 +52,7 @@ do_archive_push(char *wal_file_path, char *wal_file_name, bool overwrite)
|
||||
|
||||
if(system_id != config->system_identifier)
|
||||
elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch."
|
||||
"Instance '%s' should have SYSTEM_ID = " INT64_FORMAT " instead of " INT64_FORMAT,
|
||||
"Instance '%s' should have SYSTEM_ID = %ld instead of %ld",
|
||||
wal_file_name, instance_name, config->system_identifier, system_id);
|
||||
|
||||
/* Create 'archlog_path' directory. Do nothing if it already exists. */
|
||||
|
250
src/backup.c
250
src/backup.c
@ -10,25 +10,28 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#include <time.h>
|
||||
|
||||
#if PG_VERSION_NUM < 110000
|
||||
#include "catalog/catalog.h"
|
||||
#endif
|
||||
#include "catalog/pg_tablespace.h"
|
||||
#include "datapagemap.h"
|
||||
#include "libpq/pqsignal.h"
|
||||
#include "pgtar.h"
|
||||
#include "receivelog.h"
|
||||
#include "storage/bufpage.h"
|
||||
#include "streamutil.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "utils/thread.h"
|
||||
|
||||
#define PG_STOP_BACKUP_TIMEOUT 300
|
||||
|
||||
/*
|
||||
* Macro needed to parse ptrack.
|
||||
* NOTE Keep those values syncronised with definitions in ptrack.h
|
||||
*/
|
||||
#define PTRACK_BITS_PER_HEAPBLOCK 1
|
||||
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
|
||||
|
||||
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
|
||||
static XLogRecPtr stop_backup_lsn = InvalidXLogRecPtr;
|
||||
static XLogRecPtr stop_stream_lsn = InvalidXLogRecPtr;
|
||||
@ -104,7 +107,8 @@ static int checkpoint_timeout(void);
|
||||
|
||||
//static void backup_list_file(parray *files, const char *root, )
|
||||
static void parse_backup_filelist_filenames(parray *files, const char *root);
|
||||
static void wait_wal_lsn(XLogRecPtr lsn, bool wait_prev_segment);
|
||||
static void wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn,
|
||||
bool wait_prev_segment);
|
||||
static void wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup);
|
||||
static void make_pagemap_from_ptrack(parray *files);
|
||||
static void *StreamLog(void *arg);
|
||||
@ -530,7 +534,7 @@ do_backup_instance(void)
|
||||
prev_backup_start_lsn = prev_backup->start_lsn;
|
||||
current.parent_backup = prev_backup->start_time;
|
||||
|
||||
pgBackupWriteBackupControlFile(¤t);
|
||||
write_backup(¤t);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -543,10 +547,11 @@ do_backup_instance(void)
|
||||
|
||||
if (ptrack_lsn > prev_backup->stop_lsn || ptrack_lsn == InvalidXLogRecPtr)
|
||||
{
|
||||
elog(ERROR, "LSN from ptrack_control " UINT64_FORMAT " differs from STOP LSN of previous backup "
|
||||
UINT64_FORMAT ".\n"
|
||||
elog(ERROR, "LSN from ptrack_control %X/%X differs from STOP LSN of previous backup %X/%X.\n"
|
||||
"Create new full backup before an incremental one.",
|
||||
ptrack_lsn, prev_backup->stop_lsn);
|
||||
(uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn),
|
||||
(uint32) (prev_backup->stop_lsn >> 32),
|
||||
(uint32) (prev_backup->stop_lsn));
|
||||
}
|
||||
}
|
||||
|
||||
@ -650,11 +655,7 @@ do_backup_instance(void)
|
||||
*/
|
||||
extractPageMap(arclog_path, current.tli, xlog_seg_size,
|
||||
prev_backup->start_lsn, current.start_lsn,
|
||||
/*
|
||||
* For backup from master wait for previous segment.
|
||||
* For backup from replica wait for current segment.
|
||||
*/
|
||||
!current.from_replica, backup_files_list);
|
||||
backup_files_list);
|
||||
}
|
||||
else if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
||||
{
|
||||
@ -721,7 +722,7 @@ do_backup_instance(void)
|
||||
}
|
||||
|
||||
/* Run threads */
|
||||
elog(LOG, "Start transfering data files");
|
||||
elog(INFO, "Start transfering data files");
|
||||
for (i = 0; i < num_threads; i++)
|
||||
{
|
||||
backup_files_arg *arg = &(threads_args[i]);
|
||||
@ -742,7 +743,7 @@ do_backup_instance(void)
|
||||
backup_isok = false;
|
||||
}
|
||||
if (backup_isok)
|
||||
elog(LOG, "Data files are transfered");
|
||||
elog(INFO, "Data files are transfered");
|
||||
else
|
||||
elog(ERROR, "Data files transferring failed");
|
||||
|
||||
@ -908,11 +909,13 @@ do_backup(time_t start_time)
|
||||
/* Start backup. Update backup status. */
|
||||
current.status = BACKUP_STATUS_RUNNING;
|
||||
current.start_time = start_time;
|
||||
StrNCpy(current.program_version, PROGRAM_VERSION,
|
||||
sizeof(current.program_version));
|
||||
|
||||
/* Create backup directory and BACKUP_CONTROL_FILE */
|
||||
if (pgBackupCreateDir(¤t))
|
||||
elog(ERROR, "cannot create backup directory");
|
||||
pgBackupWriteBackupControlFile(¤t);
|
||||
write_backup(¤t);
|
||||
|
||||
elog(LOG, "Backup destination is initialized");
|
||||
|
||||
@ -934,7 +937,7 @@ do_backup(time_t start_time)
|
||||
/* Backup is done. Update backup status */
|
||||
current.end_time = time(NULL);
|
||||
current.status = BACKUP_STATUS_DONE;
|
||||
pgBackupWriteBackupControlFile(¤t);
|
||||
write_backup(¤t);
|
||||
|
||||
//elog(LOG, "Backup completed. Total bytes : " INT64_FORMAT "",
|
||||
// current.data_bytes);
|
||||
@ -1039,13 +1042,13 @@ check_system_identifiers(void)
|
||||
system_id_conn = get_remote_system_identifier(backup_conn);
|
||||
|
||||
if (system_id_conn != system_identifier)
|
||||
elog(ERROR, "Backup data directory was initialized for system id " UINT64_FORMAT
|
||||
", but connected instance system id is " UINT64_FORMAT,
|
||||
system_identifier, system_id_conn);
|
||||
elog(ERROR, "Backup data directory was initialized for system id " UINT64_FORMAT ", "
|
||||
"but connected instance system id is " UINT64_FORMAT,
|
||||
system_identifier, system_id_conn);
|
||||
if (system_id_pgdata != system_identifier)
|
||||
elog(ERROR, "Backup data directory was initialized for system id " UINT64_FORMAT
|
||||
", but target backup directory system id is " UINT64_FORMAT,
|
||||
system_identifier, system_id_pgdata);
|
||||
elog(ERROR, "Backup data directory was initialized for system id " UINT64_FORMAT ", "
|
||||
"but target backup directory system id is " UINT64_FORMAT,
|
||||
system_identifier, system_id_pgdata);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1080,8 +1083,8 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
||||
{
|
||||
PGresult *res;
|
||||
const char *params[2];
|
||||
uint32 xlogid;
|
||||
uint32 xrecoff;
|
||||
uint32 lsn_hi;
|
||||
uint32 lsn_lo;
|
||||
PGconn *conn;
|
||||
|
||||
params[0] = label;
|
||||
@ -1109,9 +1112,9 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
||||
backup_in_progress = true;
|
||||
|
||||
/* Extract timeline and LSN from results of pg_start_backup() */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
|
||||
/* Calculate LSN */
|
||||
backup->start_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
|
||||
backup->start_lsn = ((uint64) lsn_hi )<< 32 | lsn_lo;
|
||||
|
||||
PQclear(res);
|
||||
|
||||
@ -1122,20 +1125,17 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
||||
*/
|
||||
pg_switch_wal(conn);
|
||||
|
||||
if (!stream_wal)
|
||||
{
|
||||
/*
|
||||
* Do not wait start_lsn for stream backup.
|
||||
* Because WAL streaming will start after pg_start_backup() in stream
|
||||
* mode.
|
||||
*/
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
||||
/* In PAGE mode wait for current segment... */
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
||||
wait_wal_lsn(backup->start_lsn, false);
|
||||
wait_wal_lsn(backup->start_lsn, true, false);
|
||||
/*
|
||||
* Do not wait start_lsn for stream backup.
|
||||
* Because WAL streaming will start after pg_start_backup() in stream
|
||||
* mode.
|
||||
*/
|
||||
else if (!stream_wal)
|
||||
/* ...for others wait for previous segment */
|
||||
else
|
||||
wait_wal_lsn(backup->start_lsn, true);
|
||||
}
|
||||
wait_wal_lsn(backup->start_lsn, true, true);
|
||||
|
||||
/* Wait for start_lsn to be replayed by replica */
|
||||
if (backup->from_replica)
|
||||
@ -1154,10 +1154,11 @@ pg_switch_wal(PGconn *conn)
|
||||
res = pgut_execute(conn, "SET client_min_messages = warning;", 0, NULL);
|
||||
PQclear(res);
|
||||
|
||||
if (server_version >= 100000)
|
||||
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_wal()", 0, NULL);
|
||||
else
|
||||
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_xlog()", 0, NULL);
|
||||
#if PG_VERSION_NUM >= 100000
|
||||
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_wal()", 0, NULL);
|
||||
#else
|
||||
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_xlog()", 0, NULL);
|
||||
#endif
|
||||
|
||||
PQclear(res);
|
||||
}
|
||||
@ -1453,16 +1454,20 @@ pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode,
|
||||
* If current backup started in stream mode wait for 'lsn' to be streamed in
|
||||
* 'pg_wal' directory.
|
||||
*
|
||||
* If 'is_start_lsn' is true and backup mode is PAGE then we wait for 'lsn' to
|
||||
* be archived in archive 'wal' directory regardless stream mode.
|
||||
*
|
||||
* If 'wait_prev_segment' wait for previous segment.
|
||||
*/
|
||||
static void
|
||||
wait_wal_lsn(XLogRecPtr lsn, bool wait_prev_segment)
|
||||
wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
|
||||
{
|
||||
TimeLineID tli;
|
||||
XLogSegNo targetSegNo;
|
||||
char wal_dir[MAXPGPATH],
|
||||
wal_segment_path[MAXPGPATH];
|
||||
char wal_segment[MAXFNAMELEN];
|
||||
char pg_wal_dir[MAXPGPATH];
|
||||
char wal_segment_path[MAXPGPATH],
|
||||
*wal_segment_dir,
|
||||
wal_segment[MAXFNAMELEN];
|
||||
bool file_exists = false;
|
||||
uint32 try_count = 0,
|
||||
timeout;
|
||||
@ -1479,11 +1484,20 @@ wait_wal_lsn(XLogRecPtr lsn, bool wait_prev_segment)
|
||||
targetSegNo--;
|
||||
GetXLogFileName(wal_segment, tli, targetSegNo, xlog_seg_size);
|
||||
|
||||
if (stream_wal)
|
||||
/*
|
||||
* In pg_start_backup we wait for 'lsn' in 'pg_wal' directory iff it is
|
||||
* stream and non-page backup. Page backup needs archived WAL files, so we
|
||||
* wait for 'lsn' in archive 'wal' directory for page backups.
|
||||
*
|
||||
* In pg_stop_backup it depends only on stream_wal.
|
||||
*/
|
||||
if (stream_wal &&
|
||||
(current.backup_mode != BACKUP_MODE_DIFF_PAGE || !is_start_lsn))
|
||||
{
|
||||
pgBackupGetPath2(¤t, wal_dir, lengthof(wal_dir),
|
||||
pgBackupGetPath2(¤t, pg_wal_dir, lengthof(pg_wal_dir),
|
||||
DATABASE_DIR, PG_XLOG_DIR);
|
||||
join_path_components(wal_segment_path, wal_dir, wal_segment);
|
||||
join_path_components(wal_segment_path, pg_wal_dir, wal_segment);
|
||||
wal_segment_dir = pg_wal_dir;
|
||||
|
||||
timeout = (uint32) checkpoint_timeout();
|
||||
timeout = timeout + timeout * 0.1;
|
||||
@ -1491,6 +1505,7 @@ wait_wal_lsn(XLogRecPtr lsn, bool wait_prev_segment)
|
||||
else
|
||||
{
|
||||
join_path_components(wal_segment_path, arclog_path, wal_segment);
|
||||
wal_segment_dir = arclog_path;
|
||||
timeout = archive_timeout;
|
||||
}
|
||||
|
||||
@ -1533,10 +1548,7 @@ wait_wal_lsn(XLogRecPtr lsn, bool wait_prev_segment)
|
||||
/*
|
||||
* A WAL segment found. Check LSN on it.
|
||||
*/
|
||||
if ((stream_wal && wal_contains_lsn(wal_dir, lsn, tli,
|
||||
xlog_seg_size)) ||
|
||||
(!stream_wal && wal_contains_lsn(arclog_path, lsn, tli,
|
||||
xlog_seg_size)))
|
||||
if (wal_contains_lsn(wal_segment_dir, lsn, tli, xlog_seg_size))
|
||||
/* Target LSN was found */
|
||||
{
|
||||
elog(LOG, "Found LSN: %X/%X", (uint32) (lsn >> 32), (uint32) lsn);
|
||||
@ -1585,9 +1597,6 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
|
||||
|
||||
while (true)
|
||||
{
|
||||
PGresult *res;
|
||||
uint32 xlogid;
|
||||
uint32 xrecoff;
|
||||
XLogRecPtr replica_lsn;
|
||||
|
||||
/*
|
||||
@ -1596,12 +1605,7 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
|
||||
*/
|
||||
if (is_start_backup)
|
||||
{
|
||||
if (server_version >= 100000)
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_replay_lsn()",
|
||||
0, NULL);
|
||||
else
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_replay_location()",
|
||||
0, NULL);
|
||||
replica_lsn = get_checkpoint_location(backup_conn);
|
||||
}
|
||||
/*
|
||||
* For lsn from pg_stop_backup() we need it only to be received by
|
||||
@ -1609,19 +1613,24 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
|
||||
*/
|
||||
else
|
||||
{
|
||||
if (server_version >= 100000)
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_receive_lsn()",
|
||||
0, NULL);
|
||||
else
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_receive_location()",
|
||||
0, NULL);
|
||||
}
|
||||
PGresult *res;
|
||||
uint32 lsn_hi;
|
||||
uint32 lsn_lo;
|
||||
|
||||
/* Extract timeline and LSN from result */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
|
||||
/* Calculate LSN */
|
||||
replica_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
|
||||
PQclear(res);
|
||||
#if PG_VERSION_NUM >= 100000
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_receive_lsn()",
|
||||
0, NULL);
|
||||
#else
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_receive_location()",
|
||||
0, NULL);
|
||||
#endif
|
||||
|
||||
/* Extract LSN from result */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
|
||||
/* Calculate LSN */
|
||||
replica_lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
/* target lsn was replicated */
|
||||
if (replica_lsn >= lsn)
|
||||
@ -1654,10 +1663,10 @@ pg_stop_backup(pgBackup *backup)
|
||||
PGconn *conn;
|
||||
PGresult *res;
|
||||
PGresult *tablespace_map_content = NULL;
|
||||
uint32 xlogid;
|
||||
uint32 xrecoff;
|
||||
uint32 lsn_hi;
|
||||
uint32 lsn_lo;
|
||||
XLogRecPtr restore_lsn = InvalidXLogRecPtr;
|
||||
int pg_stop_backup_timeout = 0;
|
||||
int pg_stop_backup_timeout = 0;
|
||||
char path[MAXPGPATH];
|
||||
char backup_label[MAXPGPATH];
|
||||
FILE *fp;
|
||||
@ -1700,6 +1709,10 @@ pg_stop_backup(pgBackup *backup)
|
||||
|
||||
res = pgut_execute(conn, "SELECT pg_catalog.pg_create_restore_point($1)",
|
||||
1, params);
|
||||
/* Extract timeline and LSN from the result */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
|
||||
/* Calculate LSN */
|
||||
restore_lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
@ -1732,7 +1745,6 @@ pg_stop_backup(pgBackup *backup)
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
stop_backup_query = "SELECT"
|
||||
" pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
|
||||
" current_timestamp(0)::timestamptz,"
|
||||
@ -1751,6 +1763,8 @@ pg_stop_backup(pgBackup *backup)
|
||||
*/
|
||||
if (pg_stop_backup_is_sent && !in_cleanup)
|
||||
{
|
||||
res = NULL;
|
||||
|
||||
while (1)
|
||||
{
|
||||
if (!PQconsumeInput(conn) || PQisBusy(conn))
|
||||
@ -1792,8 +1806,11 @@ pg_stop_backup(pgBackup *backup)
|
||||
{
|
||||
switch (PQresultStatus(res))
|
||||
{
|
||||
/*
|
||||
* We should expect only PGRES_TUPLES_OK since pg_stop_backup
|
||||
* returns tuples.
|
||||
*/
|
||||
case PGRES_TUPLES_OK:
|
||||
case PGRES_COMMAND_OK:
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "query failed: %s query was: %s",
|
||||
@ -1805,9 +1822,9 @@ pg_stop_backup(pgBackup *backup)
|
||||
backup_in_progress = false;
|
||||
|
||||
/* Extract timeline and LSN from results of pg_stop_backup() */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 2), &xlogid, &xrecoff);
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 2), &lsn_hi, &lsn_lo);
|
||||
/* Calculate LSN */
|
||||
stop_backup_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
|
||||
stop_backup_lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
|
||||
|
||||
if (!XRecOffIsValid(stop_backup_lsn))
|
||||
{
|
||||
@ -1924,7 +1941,7 @@ pg_stop_backup(pgBackup *backup)
|
||||
* Wait for stop_lsn to be archived or streamed.
|
||||
* We wait for stop_lsn in stream mode just in case.
|
||||
*/
|
||||
wait_wal_lsn(stop_backup_lsn, false);
|
||||
wait_wal_lsn(stop_backup_lsn, false, false);
|
||||
|
||||
if (stream_wal)
|
||||
{
|
||||
@ -1998,7 +2015,7 @@ backup_cleanup(bool fatal, void *userdata)
|
||||
base36enc(current.start_time));
|
||||
current.end_time = time(NULL);
|
||||
current.status = BACKUP_STATUS_ERROR;
|
||||
pgBackupWriteBackupControlFile(¤t);
|
||||
write_backup(¤t);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2084,12 +2101,13 @@ backup_files(void *arg)
|
||||
|
||||
if (S_ISREG(buf.st_mode))
|
||||
{
|
||||
pgFile **prev_file;
|
||||
|
||||
/* Check that file exist in previous backup */
|
||||
if (current.backup_mode != BACKUP_MODE_FULL)
|
||||
{
|
||||
char *relative;
|
||||
pgFile key;
|
||||
pgFile **prev_file;
|
||||
|
||||
relative = GetRelativePath(file->path, arguments->from_root);
|
||||
key.path = relative;
|
||||
@ -2119,20 +2137,27 @@ backup_files(void *arg)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/* TODO:
|
||||
* Check if file exists in previous backup
|
||||
* If exists:
|
||||
* if mtime > start_backup_time of parent backup,
|
||||
* copy file to backup
|
||||
* if mtime < start_backup_time
|
||||
* calculate crc, compare crc to old file
|
||||
* if crc is the same -> skip file
|
||||
*/
|
||||
else if (!copy_file(arguments->from_root, arguments->to_root, file))
|
||||
else
|
||||
{
|
||||
file->write_size = BYTES_INVALID;
|
||||
elog(VERBOSE, "File \"%s\" was not copied to backup", file->path);
|
||||
continue;
|
||||
bool skip = false;
|
||||
|
||||
/* If non-data file has not changed since last backup... */
|
||||
if (file->exists_in_prev &&
|
||||
buf.st_mtime < current.parent_backup)
|
||||
{
|
||||
calc_file_checksum(file);
|
||||
/* ...and checksum is the same... */
|
||||
if (EQ_CRC32C(file->crc, (*prev_file)->crc))
|
||||
skip = true; /* ...skip copying file. */
|
||||
}
|
||||
if (skip ||
|
||||
!copy_file(arguments->from_root, arguments->to_root, file))
|
||||
{
|
||||
file->write_size = BYTES_INVALID;
|
||||
elog(VERBOSE, "File \"%s\" was not copied to backup",
|
||||
file->path);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes",
|
||||
@ -2194,7 +2219,8 @@ parse_backup_filelist_filenames(parray *files, const char *root)
|
||||
|
||||
/* Yes, it is */
|
||||
if (sscanf_result == 2 &&
|
||||
strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) == 0)
|
||||
strncmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY,
|
||||
strlen(TABLESPACE_VERSION_DIRECTORY)) == 0)
|
||||
set_cfs_datafiles(files, root, relative, i);
|
||||
}
|
||||
}
|
||||
@ -2618,16 +2644,16 @@ get_last_ptrack_lsn(void)
|
||||
|
||||
{
|
||||
PGresult *res;
|
||||
uint32 xlogid;
|
||||
uint32 xrecoff;
|
||||
uint32 lsn_hi;
|
||||
uint32 lsn_lo;
|
||||
XLogRecPtr lsn;
|
||||
|
||||
res = pgut_execute(backup_conn, "select pg_catalog.pg_ptrack_control_lsn()", 0, NULL);
|
||||
|
||||
/* Extract timeline and LSN from results of pg_start_backup() */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
|
||||
/* Calculate LSN */
|
||||
lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
|
||||
lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
|
||||
|
||||
PQclear(res);
|
||||
return lsn;
|
||||
|
224
src/catalog.c
224
src/catalog.c
@ -11,13 +11,8 @@
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <sys/file.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"};
|
||||
@ -221,6 +216,25 @@ read_backup(time_t timestamp)
|
||||
return readBackupControlFile(conf_path);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the backup status into BACKUP_CONTROL_FILE.
|
||||
*
|
||||
* We need to reread the backup using its ID and save it changing only its
|
||||
* status.
|
||||
*/
|
||||
void
|
||||
write_backup_status(pgBackup *backup)
|
||||
{
|
||||
pgBackup *tmp;
|
||||
|
||||
tmp = read_backup(backup->start_time);
|
||||
|
||||
tmp->status = backup->status;
|
||||
write_backup(tmp);
|
||||
|
||||
pgBackupFree(tmp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get backup_mode in string representation.
|
||||
*/
|
||||
@ -284,20 +298,27 @@ catalog_get_backup_list(time_t requested_backup_id)
|
||||
snprintf(backup_conf_path, MAXPGPATH, "%s/%s", data_path, BACKUP_CONTROL_FILE);
|
||||
backup = readBackupControlFile(backup_conf_path);
|
||||
|
||||
/* ignore corrupted backups */
|
||||
if (backup)
|
||||
if (!backup)
|
||||
{
|
||||
backup->backup_id = backup->start_time;
|
||||
|
||||
if (requested_backup_id != INVALID_BACKUP_ID
|
||||
&& requested_backup_id != backup->start_time)
|
||||
{
|
||||
pgBackupFree(backup);
|
||||
continue;
|
||||
}
|
||||
parray_append(backups, backup);
|
||||
backup = NULL;
|
||||
backup = pgut_new(pgBackup);
|
||||
pgBackupInit(backup);
|
||||
backup->start_time = base36dec(data_ent->d_name);
|
||||
}
|
||||
else if (strcmp(base36enc(backup->start_time), data_ent->d_name) != 0)
|
||||
{
|
||||
elog(WARNING, "backup ID in control file \"%s\" doesn't match name of the backup folder \"%s\"",
|
||||
base36enc(backup->start_time), backup_conf_path);
|
||||
}
|
||||
|
||||
backup->backup_id = backup->start_time;
|
||||
if (requested_backup_id != INVALID_BACKUP_ID
|
||||
&& requested_backup_id != backup->start_time)
|
||||
{
|
||||
pgBackupFree(backup);
|
||||
continue;
|
||||
}
|
||||
parray_append(backups, backup);
|
||||
backup = NULL;
|
||||
|
||||
if (errno && errno != ENOENT)
|
||||
{
|
||||
@ -424,7 +445,8 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
|
||||
fprintf(out, "block-size = %u\n", backup->block_size);
|
||||
fprintf(out, "xlog-block-size = %u\n", backup->wal_block_size);
|
||||
fprintf(out, "checksum-version = %u\n", backup->checksum_version);
|
||||
fprintf(out, "program-version = %s\n", PROGRAM_VERSION);
|
||||
if (backup->program_version[0] != '\0')
|
||||
fprintf(out, "program-version = %s\n", backup->program_version);
|
||||
if (backup->server_version[0] != '\0')
|
||||
fprintf(out, "server-version = %s\n", backup->server_version);
|
||||
|
||||
@ -474,17 +496,19 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
|
||||
fprintf(out, "primary_conninfo = '%s'\n", backup->primary_conninfo);
|
||||
}
|
||||
|
||||
/* create BACKUP_CONTROL_FILE */
|
||||
/*
|
||||
* Save the backup content into BACKUP_CONTROL_FILE.
|
||||
*/
|
||||
void
|
||||
pgBackupWriteBackupControlFile(pgBackup *backup)
|
||||
write_backup(pgBackup *backup)
|
||||
{
|
||||
FILE *fp = NULL;
|
||||
char ini_path[MAXPGPATH];
|
||||
char conf_path[MAXPGPATH];
|
||||
|
||||
pgBackupGetPath(backup, ini_path, lengthof(ini_path), BACKUP_CONTROL_FILE);
|
||||
fp = fopen(ini_path, "wt");
|
||||
pgBackupGetPath(backup, conf_path, lengthof(conf_path), BACKUP_CONTROL_FILE);
|
||||
fp = fopen(conf_path, "wt");
|
||||
if (fp == NULL)
|
||||
elog(ERROR, "cannot open configuration file \"%s\": %s", ini_path,
|
||||
elog(ERROR, "Cannot open configuration file \"%s\": %s", conf_path,
|
||||
strerror(errno));
|
||||
|
||||
pgBackupWriteControl(fp, backup);
|
||||
@ -562,6 +586,7 @@ readBackupControlFile(const char *path)
|
||||
{0}
|
||||
};
|
||||
|
||||
pgBackupInit(backup);
|
||||
if (access(path, F_OK) != 0)
|
||||
{
|
||||
elog(WARNING, "Control file \"%s\" doesn't exist", path);
|
||||
@ -569,7 +594,6 @@ readBackupControlFile(const char *path)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pgBackupInit(backup);
|
||||
parsed_options = pgut_readopt(path, options, WARNING, true);
|
||||
|
||||
if (parsed_options == 0)
|
||||
@ -869,47 +893,133 @@ pgBackupGetPath2(const pgBackup *backup, char *path, size_t len,
|
||||
make_native_path(path);
|
||||
}
|
||||
|
||||
/* Find parent base FULL backup for current backup using parent_backup_link,
|
||||
* return NULL if not found
|
||||
/*
|
||||
* Find parent base FULL backup for current backup using parent_backup_link
|
||||
*/
|
||||
pgBackup*
|
||||
find_parent_backup(pgBackup *current_backup)
|
||||
find_parent_full_backup(pgBackup *current_backup)
|
||||
{
|
||||
pgBackup *base_full_backup = NULL;
|
||||
base_full_backup = current_backup;
|
||||
|
||||
while (base_full_backup->backup_mode != BACKUP_MODE_FULL)
|
||||
if (!current_backup)
|
||||
elog(ERROR, "Target backup cannot be NULL");
|
||||
|
||||
while (base_full_backup->parent_backup_link != NULL)
|
||||
{
|
||||
/*
|
||||
* If we haven't found parent for incremental backup,
|
||||
* mark it and all depending backups as orphaned
|
||||
*/
|
||||
if (base_full_backup->parent_backup_link == NULL
|
||||
|| (base_full_backup->status != BACKUP_STATUS_OK
|
||||
&& base_full_backup->status != BACKUP_STATUS_DONE))
|
||||
{
|
||||
pgBackup *orphaned_backup = current_backup;
|
||||
|
||||
while (orphaned_backup != NULL)
|
||||
{
|
||||
orphaned_backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(orphaned_backup);
|
||||
if (base_full_backup->parent_backup_link == NULL)
|
||||
elog(WARNING, "Backup %s is orphaned because its parent backup is not found",
|
||||
base36enc(orphaned_backup->start_time));
|
||||
else
|
||||
elog(WARNING, "Backup %s is orphaned because its parent backup is corrupted",
|
||||
base36enc(orphaned_backup->start_time));
|
||||
|
||||
orphaned_backup = orphaned_backup->parent_backup_link;
|
||||
}
|
||||
|
||||
base_full_backup = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
base_full_backup = base_full_backup->parent_backup_link;
|
||||
}
|
||||
|
||||
if (base_full_backup->backup_mode != BACKUP_MODE_FULL)
|
||||
elog(ERROR, "Failed to find FULL backup parent for %s",
|
||||
base36enc(current_backup->start_time));
|
||||
|
||||
return base_full_backup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Interate over parent chain and look for any problems.
|
||||
* Return 0 if chain is broken.
|
||||
* result_backup must contain oldest existing backup after missing backup.
|
||||
* we have no way to know if there are multiple missing backups.
|
||||
* Return 1 if chain is intact, but at least one backup is !OK.
|
||||
* result_backup must contain oldest !OK backup.
|
||||
* Return 2 if chain is intact and all backups are OK.
|
||||
* result_backup must contain FULL backup on which chain is based.
|
||||
*/
|
||||
int
|
||||
scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup)
|
||||
{
|
||||
pgBackup *target_backup = NULL;
|
||||
pgBackup *invalid_backup = NULL;
|
||||
|
||||
if (!current_backup)
|
||||
elog(ERROR, "Target backup cannot be NULL");
|
||||
|
||||
target_backup = current_backup;
|
||||
|
||||
while (target_backup->parent_backup_link)
|
||||
{
|
||||
if (target_backup->status != BACKUP_STATUS_OK &&
|
||||
target_backup->status != BACKUP_STATUS_DONE)
|
||||
/* oldest invalid backup in parent chain */
|
||||
invalid_backup = target_backup;
|
||||
|
||||
|
||||
target_backup = target_backup->parent_backup_link;
|
||||
}
|
||||
|
||||
/* Prevous loop will skip FULL backup because his parent_backup_link is NULL */
|
||||
if (target_backup->backup_mode == BACKUP_MODE_FULL &&
|
||||
(target_backup->status != BACKUP_STATUS_OK &&
|
||||
target_backup->status != BACKUP_STATUS_DONE))
|
||||
{
|
||||
invalid_backup = target_backup;
|
||||
}
|
||||
|
||||
/* found chain end and oldest backup is not FULL */
|
||||
if (target_backup->backup_mode != BACKUP_MODE_FULL)
|
||||
{
|
||||
/* Set oldest child backup in chain */
|
||||
*result_backup = target_backup;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* chain is ok, but some backups are invalid */
|
||||
if (invalid_backup)
|
||||
{
|
||||
*result_backup = invalid_backup;
|
||||
return 1;
|
||||
}
|
||||
|
||||
*result_backup = target_backup;
|
||||
return 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if child_backup descend from parent_backup
|
||||
* This check DO NOT(!!!) guarantee that parent chain is intact,
|
||||
* because parent_backup can be missing.
|
||||
* If inclusive is true, then child_backup counts as a child of himself
|
||||
* if parent_backup_time is start_time of child_backup.
|
||||
*/
|
||||
bool
|
||||
is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive)
|
||||
{
|
||||
if (!child_backup)
|
||||
elog(ERROR, "Target backup cannot be NULL");
|
||||
|
||||
while (child_backup->parent_backup_link &&
|
||||
child_backup->parent_backup != parent_backup_time)
|
||||
{
|
||||
child_backup = child_backup->parent_backup_link;
|
||||
}
|
||||
|
||||
if (child_backup->parent_backup == parent_backup_time)
|
||||
return true;
|
||||
|
||||
if (inclusive && child_backup->start_time == parent_backup_time)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return backup index number.
|
||||
* Note: this index number holds true until new sorting of backup list
|
||||
*/
|
||||
int
|
||||
get_backup_index_number(parray *backup_list, pgBackup *backup)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < parray_num(backup_list); i++)
|
||||
{
|
||||
pgBackup *tmp_backup = (pgBackup *) parray_get(backup_list, i);
|
||||
|
||||
if (tmp_backup->start_time == backup->start_time)
|
||||
return i;
|
||||
}
|
||||
elog(ERROR, "Failed to find backup %s", base36enc(backup->start_time));
|
||||
return 0;
|
||||
}
|
||||
|
@ -8,9 +8,6 @@
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
#include "utils/logger.h"
|
||||
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
#include "utils/json.h"
|
||||
|
||||
@ -171,12 +168,12 @@ writeBackupCatalogConfig(FILE *out, pgBackupConfig *config)
|
||||
if (config->master_user)
|
||||
fprintf(out, "master-user = %s\n", config->master_user);
|
||||
|
||||
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_S,
|
||||
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
fprintf(out, "replica-timeout = " UINT64_FORMAT "%s\n", res, unit);
|
||||
|
||||
fprintf(out, "#Archive parameters:\n");
|
||||
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_S,
|
||||
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
fprintf(out, "archive-timeout = " UINT64_FORMAT "%s\n", res, unit);
|
||||
|
||||
@ -193,11 +190,11 @@ writeBackupCatalogConfig(FILE *out, pgBackupConfig *config)
|
||||
fprintf(out, "log-directory = %s\n", config->log_directory);
|
||||
/* Convert values from base unit */
|
||||
convert_from_base_unit_u(config->log_rotation_size, OPTION_UNIT_KB,
|
||||
&res, &unit);
|
||||
&res, &unit);
|
||||
fprintf(out, "log-rotation-size = " UINT64_FORMAT "%s\n", res, (res)?unit:"KB");
|
||||
|
||||
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_S,
|
||||
&res, &unit);
|
||||
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
fprintf(out, "log-rotation-age = " UINT64_FORMAT "%s\n", res, (res)?unit:"min");
|
||||
|
||||
fprintf(out, "#Retention parameters:\n");
|
||||
@ -247,8 +244,8 @@ readBackupCatalogConfigFile(void)
|
||||
{ 's', 0, "log-filename", &(config->log_filename), SOURCE_CMDLINE },
|
||||
{ 's', 0, "error-log-filename", &(config->error_log_filename), SOURCE_CMDLINE },
|
||||
{ 's', 0, "log-directory", &(config->log_directory), SOURCE_CMDLINE },
|
||||
{ 'u', 0, "log-rotation-size", &(config->log_rotation_size), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
|
||||
{ 'u', 0, "log-rotation-age", &(config->log_rotation_age), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
{ 'U', 0, "log-rotation-size", &(config->log_rotation_size), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
|
||||
{ 'U', 0, "log-rotation-age", &(config->log_rotation_age), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
|
||||
/* connection options */
|
||||
{ 's', 0, "pgdata", &(config->pgdata), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "pgdatabase", &(config->pgdatabase), SOURCE_FILE_STRICT },
|
||||
@ -260,14 +257,14 @@ readBackupCatalogConfigFile(void)
|
||||
{ 's', 0, "master-port", &(config->master_port), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "master-db", &(config->master_db), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "master-user", &(config->master_user), SOURCE_FILE_STRICT },
|
||||
{ 'u', 0, "replica-timeout", &(config->replica_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
{ 'u', 0, "replica-timeout", &(config->replica_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
|
||||
/* other options */
|
||||
{ 'U', 0, "system-identifier", &(config->system_identifier), SOURCE_FILE_STRICT },
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
{'u', 0, "xlog-seg-size", &config->xlog_seg_size, SOURCE_FILE_STRICT},
|
||||
#endif
|
||||
/* archive options */
|
||||
{ 'u', 0, "archive-timeout", &(config->archive_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
{ 'u', 0, "archive-timeout", &(config->archive_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -424,13 +421,13 @@ show_configure_json(pgBackupConfig *config)
|
||||
true);
|
||||
|
||||
json_add_key(buf, "replica-timeout", json_level, true);
|
||||
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_S,
|
||||
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, unit);
|
||||
|
||||
/* Archive parameters */
|
||||
json_add_key(buf, "archive-timeout", json_level, true);
|
||||
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_S,
|
||||
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, unit);
|
||||
|
||||
@ -467,7 +464,7 @@ show_configure_json(pgBackupConfig *config)
|
||||
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, (res)?unit:"KB");
|
||||
|
||||
json_add_key(buf, "log-rotation-age", json_level, true);
|
||||
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_S,
|
||||
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, (res)?unit:"min");
|
||||
|
||||
|
237
src/data.c
237
src/data.c
@ -10,21 +10,23 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "libpq/pqsignal.h"
|
||||
#include "storage/block.h"
|
||||
#include "storage/bufpage.h"
|
||||
#include "storage/checksum.h"
|
||||
#include "storage/checksum_impl.h"
|
||||
#include <common/pg_lzcompress.h>
|
||||
|
||||
#include <sys/stat.h>
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
#include <zlib.h>
|
||||
#endif
|
||||
|
||||
/* Union to ease operations on relation pages */
|
||||
typedef union DataPage
|
||||
{
|
||||
PageHeaderData page_data;
|
||||
char data[BLCKSZ];
|
||||
} DataPage;
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
/* Implementation of zlib compression method */
|
||||
static int32
|
||||
@ -339,9 +341,11 @@ prepare_page(backup_files_arg *arguments,
|
||||
|
||||
}
|
||||
|
||||
/* Nullified pages must be copied by DELTA backup, just to be safe */
|
||||
if (backup_mode == BACKUP_MODE_DIFF_DELTA &&
|
||||
file->exists_in_prev &&
|
||||
!page_is_truncated &&
|
||||
page_lsn &&
|
||||
page_lsn < prev_backup_start_lsn)
|
||||
{
|
||||
elog(VERBOSE, "Skipping blknum: %u in file: %s", blknum, file->path);
|
||||
@ -719,7 +723,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
|
||||
|
||||
uncompressed_size = do_decompress(page.data, BLCKSZ,
|
||||
compressed_page.data,
|
||||
MAXALIGN(header.compressed_size),
|
||||
header.compressed_size,
|
||||
file->compress_alg);
|
||||
|
||||
if (uncompressed_size != BLCKSZ)
|
||||
@ -794,7 +798,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
|
||||
if (ftruncate(fileno(out), write_pos) != 0)
|
||||
elog(ERROR, "cannot truncate \"%s\": %s",
|
||||
file->path, strerror(errno));
|
||||
elog(INFO, "Delta truncate file %s to block %u",
|
||||
elog(VERBOSE, "Delta truncate file %s to block %u",
|
||||
file->path, truncate_from);
|
||||
}
|
||||
|
||||
@ -1405,3 +1409,216 @@ calc_file_checksum(pgFile *file)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate given page.
|
||||
*
|
||||
* Returns value:
|
||||
* 0 - if the page is not found
|
||||
* 1 - if the page is found and valid
|
||||
* -1 - if the page is found but invalid
|
||||
*/
|
||||
#define PAGE_IS_NOT_FOUND 0
|
||||
#define PAGE_IS_FOUND_AND_VALID 1
|
||||
#define PAGE_IS_FOUND_AND_NOT_VALID -1
|
||||
static int
|
||||
validate_one_page(Page page, pgFile *file,
|
||||
BlockNumber blknum, XLogRecPtr stop_lsn,
|
||||
uint32 checksum_version)
|
||||
{
|
||||
PageHeader phdr;
|
||||
XLogRecPtr lsn;
|
||||
bool page_header_is_sane = false;
|
||||
bool checksum_is_ok = false;
|
||||
|
||||
/* new level of paranoia */
|
||||
if (page == NULL)
|
||||
{
|
||||
elog(LOG, "File \"%s\", block %u, page is NULL", file->path, blknum);
|
||||
return PAGE_IS_NOT_FOUND;
|
||||
}
|
||||
|
||||
phdr = (PageHeader) page;
|
||||
|
||||
if (PageIsNew(page))
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Check if the page is zeroed. */
|
||||
for(i = 0; i < BLCKSZ && page[i] == 0; i++);
|
||||
|
||||
if (i == BLCKSZ)
|
||||
{
|
||||
elog(LOG, "File: %s blknum %u, page is New, empty zeroed page",
|
||||
file->path, blknum);
|
||||
return PAGE_IS_FOUND_AND_VALID;
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(WARNING, "File: %s blknum %u, page is New, but not zeroed",
|
||||
file->path, blknum);
|
||||
}
|
||||
|
||||
/* Page is zeroed. No sense to check header and checksum. */
|
||||
page_header_is_sane = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (PageGetPageSize(phdr) == BLCKSZ &&
|
||||
PageGetPageLayoutVersion(phdr) == PG_PAGE_LAYOUT_VERSION &&
|
||||
(phdr->pd_flags & ~PD_VALID_FLAG_BITS) == 0 &&
|
||||
phdr->pd_lower >= SizeOfPageHeaderData &&
|
||||
phdr->pd_lower <= phdr->pd_upper &&
|
||||
phdr->pd_upper <= phdr->pd_special &&
|
||||
phdr->pd_special <= BLCKSZ &&
|
||||
phdr->pd_special == MAXALIGN(phdr->pd_special))
|
||||
page_header_is_sane = true;
|
||||
}
|
||||
|
||||
if (page_header_is_sane)
|
||||
{
|
||||
/* Verify checksum */
|
||||
if (checksum_version)
|
||||
{
|
||||
/*
|
||||
* If checksum is wrong, sleep a bit and then try again
|
||||
* several times. If it didn't help, throw error
|
||||
*/
|
||||
if (pg_checksum_page(page, file->segno * RELSEG_SIZE + blknum)
|
||||
== ((PageHeader) page)->pd_checksum)
|
||||
{
|
||||
checksum_is_ok = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(WARNING, "File: %s blknum %u have wrong checksum",
|
||||
file->path, blknum);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Get lsn from page header. Ensure that page is from our time */
|
||||
lsn = PageXLogRecPtrGet(phdr->pd_lsn);
|
||||
|
||||
if (lsn > stop_lsn)
|
||||
elog(WARNING, "File: %s, block %u, checksum is not enabled."
|
||||
"page is from future: pageLSN %X/%X stopLSN %X/%X",
|
||||
file->path, blknum, (uint32) (lsn >> 32), (uint32) lsn,
|
||||
(uint32) (stop_lsn >> 32), (uint32) stop_lsn);
|
||||
else
|
||||
return PAGE_IS_FOUND_AND_VALID;
|
||||
}
|
||||
|
||||
if (checksum_is_ok)
|
||||
{
|
||||
/* Get lsn from page header. Ensure that page is from our time */
|
||||
lsn = PageXLogRecPtrGet(phdr->pd_lsn);
|
||||
|
||||
if (lsn > stop_lsn)
|
||||
elog(WARNING, "File: %s, block %u, checksum is correct."
|
||||
"page is from future: pageLSN %X/%X stopLSN %X/%X",
|
||||
file->path, blknum, (uint32) (lsn >> 32), (uint32) lsn,
|
||||
(uint32) (stop_lsn >> 32), (uint32) stop_lsn);
|
||||
else
|
||||
return PAGE_IS_FOUND_AND_VALID;
|
||||
}
|
||||
}
|
||||
|
||||
return PAGE_IS_FOUND_AND_NOT_VALID;
|
||||
}
|
||||
|
||||
/* Valiate pages of datafile in backup one by one */
|
||||
bool
|
||||
check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version)
|
||||
{
|
||||
size_t read_len = 0;
|
||||
bool is_valid = true;
|
||||
FILE *in;
|
||||
|
||||
elog(VERBOSE, "validate relation blocks for file %s", file->name);
|
||||
|
||||
in = fopen(file->path, PG_BINARY_R);
|
||||
if (in == NULL)
|
||||
{
|
||||
if (errno == ENOENT)
|
||||
{
|
||||
elog(WARNING, "File \"%s\" is not found", file->path);
|
||||
return false;
|
||||
}
|
||||
|
||||
elog(ERROR, "cannot open file \"%s\": %s",
|
||||
file->path, strerror(errno));
|
||||
}
|
||||
|
||||
/* read and validate pages one by one */
|
||||
while (true)
|
||||
{
|
||||
DataPage compressed_page; /* used as read buffer */
|
||||
DataPage page;
|
||||
BackupPageHeader header;
|
||||
BlockNumber blknum = 0;
|
||||
|
||||
/* read BackupPageHeader */
|
||||
read_len = fread(&header, 1, sizeof(header), in);
|
||||
if (read_len != sizeof(header))
|
||||
{
|
||||
int errno_tmp = errno;
|
||||
if (read_len == 0 && feof(in))
|
||||
break; /* EOF found */
|
||||
else if (read_len != 0 && feof(in))
|
||||
elog(ERROR,
|
||||
"odd size page found at block %u of \"%s\"",
|
||||
blknum, file->path);
|
||||
else
|
||||
elog(ERROR, "cannot read header of block %u of \"%s\": %s",
|
||||
blknum, file->path, strerror(errno_tmp));
|
||||
}
|
||||
|
||||
if (header.block < blknum)
|
||||
elog(ERROR, "backup is broken at file->path %s block %u",
|
||||
file->path, blknum);
|
||||
|
||||
blknum = header.block;
|
||||
|
||||
if (header.compressed_size == PageIsTruncated)
|
||||
{
|
||||
elog(LOG, "File %s, block %u is truncated",
|
||||
file->path, blknum);
|
||||
continue;
|
||||
}
|
||||
|
||||
Assert(header.compressed_size <= BLCKSZ);
|
||||
|
||||
read_len = fread(compressed_page.data, 1,
|
||||
MAXALIGN(header.compressed_size), in);
|
||||
if (read_len != MAXALIGN(header.compressed_size))
|
||||
elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
|
||||
blknum, file->path, read_len, header.compressed_size);
|
||||
|
||||
if (header.compressed_size != BLCKSZ)
|
||||
{
|
||||
int32 uncompressed_size = 0;
|
||||
|
||||
uncompressed_size = do_decompress(page.data, BLCKSZ,
|
||||
compressed_page.data,
|
||||
header.compressed_size,
|
||||
file->compress_alg);
|
||||
|
||||
if (uncompressed_size != BLCKSZ)
|
||||
elog(ERROR, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
|
||||
file->path, uncompressed_size);
|
||||
|
||||
if (validate_one_page(page.data, file, blknum,
|
||||
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND_NOT_VALID)
|
||||
is_valid = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (validate_one_page(compressed_page.data, file, blknum,
|
||||
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND_NOT_VALID)
|
||||
is_valid = false;
|
||||
}
|
||||
}
|
||||
|
||||
return is_valid;
|
||||
}
|
||||
|
16
src/delete.c
16
src/delete.c
@ -14,11 +14,11 @@
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static int pgBackupDeleteFiles(pgBackup *backup);
|
||||
static int delete_backup_files(pgBackup *backup);
|
||||
static void delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli,
|
||||
uint32 xlog_seg_size);
|
||||
|
||||
int
|
||||
void
|
||||
do_delete(time_t backup_id)
|
||||
{
|
||||
int i;
|
||||
@ -85,7 +85,7 @@ do_delete(time_t backup_id)
|
||||
if (interrupted)
|
||||
elog(ERROR, "interrupted during delete backup");
|
||||
|
||||
pgBackupDeleteFiles(backup);
|
||||
delete_backup_files(backup);
|
||||
}
|
||||
|
||||
parray_free(delete_list);
|
||||
@ -115,8 +115,6 @@ do_delete(time_t backup_id)
|
||||
/* cleanup */
|
||||
parray_walk(backup_list, pgBackupFree);
|
||||
parray_free(backup_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -205,7 +203,7 @@ do_retention_purge(void)
|
||||
}
|
||||
|
||||
/* Delete backup and update status to DELETED */
|
||||
pgBackupDeleteFiles(backup);
|
||||
delete_backup_files(backup);
|
||||
backup_deleted = true;
|
||||
}
|
||||
}
|
||||
@ -248,7 +246,7 @@ do_retention_purge(void)
|
||||
* BACKUP_STATUS_DELETED.
|
||||
*/
|
||||
static int
|
||||
pgBackupDeleteFiles(pgBackup *backup)
|
||||
delete_backup_files(pgBackup *backup)
|
||||
{
|
||||
size_t i;
|
||||
char path[MAXPGPATH];
|
||||
@ -271,7 +269,7 @@ pgBackupDeleteFiles(pgBackup *backup)
|
||||
* the error occurs before deleting all backup files.
|
||||
*/
|
||||
backup->status = BACKUP_STATUS_DELETING;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
write_backup_status(backup);
|
||||
|
||||
/* list files to be deleted */
|
||||
files = parray_new();
|
||||
@ -433,7 +431,7 @@ do_delete_instance(void)
|
||||
for (i = 0; i < parray_num(backup_list); i++)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
|
||||
pgBackupDeleteFiles(backup);
|
||||
delete_backup_files(backup);
|
||||
}
|
||||
|
||||
/* Cleanup */
|
||||
|
37
src/dir.c
37
src/dir.c
@ -10,15 +10,14 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#if PG_VERSION_NUM < 110000
|
||||
#include "catalog/catalog.h"
|
||||
#endif
|
||||
#include "catalog/pg_tablespace.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <dirent.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "catalog/catalog.h"
|
||||
#include "catalog/pg_tablespace.h"
|
||||
#include "datapagemap.h"
|
||||
|
||||
/*
|
||||
* The contents of these directories are removed or recreated during server
|
||||
@ -368,7 +367,7 @@ BlackListCompare(const void *str1, const void *str2)
|
||||
* pgFile objects to "files". We add "root" to "files" if add_root is true.
|
||||
*
|
||||
* When omit_symlink is true, symbolic link is ignored and only file or
|
||||
* directory llnked to will be listed.
|
||||
* directory linked to will be listed.
|
||||
*/
|
||||
void
|
||||
dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
|
||||
@ -918,6 +917,8 @@ create_data_directories(const char *data_dir, const char *backup_dir,
|
||||
{
|
||||
links = parray_new();
|
||||
read_tablespace_map(links, backup_dir);
|
||||
/* Sort links by a link name*/
|
||||
parray_qsort(links, pgFileComparePath);
|
||||
}
|
||||
|
||||
join_path_components(backup_database_dir, backup_dir, DATABASE_DIR);
|
||||
@ -994,14 +995,6 @@ create_data_directories(const char *data_dir, const char *backup_dir,
|
||||
linked_path, dir_created, link_name);
|
||||
}
|
||||
|
||||
/*
|
||||
* This check was done in check_tablespace_mapping(). But do
|
||||
* it again.
|
||||
*/
|
||||
if (!dir_is_empty(linked_path))
|
||||
elog(ERROR, "restore tablespace destination is not empty: \"%s\"",
|
||||
linked_path);
|
||||
|
||||
if (link_sep)
|
||||
elog(LOG, "create directory \"%s\" and symbolic link \"%.*s\"",
|
||||
linked_path,
|
||||
@ -1102,7 +1095,6 @@ read_tablespace_map(parray *files, const char *backup_dir)
|
||||
parray_append(files, file);
|
||||
}
|
||||
|
||||
parray_qsort(files, pgFileCompareLinked);
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
@ -1126,6 +1118,8 @@ check_tablespace_mapping(pgBackup *backup)
|
||||
|
||||
pgBackupGetPath(backup, this_backup_path, lengthof(this_backup_path), NULL);
|
||||
read_tablespace_map(links, this_backup_path);
|
||||
/* Sort links by the path of a linked file*/
|
||||
parray_qsort(links, pgFileCompareLinked);
|
||||
|
||||
if (log_level_console <= LOG || log_level_file <= LOG)
|
||||
elog(LOG, "check tablespace directories of backup %s",
|
||||
@ -1303,7 +1297,13 @@ get_control_value(const char *str, const char *name,
|
||||
|
||||
*buf_int64_ptr = '\0';
|
||||
if (!parse_int64(buf_int64, value_int64, 0))
|
||||
goto bad_format;
|
||||
{
|
||||
/* We assume that too big value is -1 */
|
||||
if (errno == ERANGE)
|
||||
*value_int64 = BYTES_INVALID;
|
||||
else
|
||||
goto bad_format;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -1363,8 +1363,7 @@ dir_read_file_list(const char *root, const char *file_txt)
|
||||
|
||||
fp = fopen(file_txt, "rt");
|
||||
if (fp == NULL)
|
||||
elog(errno == ENOENT ? ERROR : ERROR,
|
||||
"cannot open \"%s\": %s", file_txt, strerror(errno));
|
||||
elog(ERROR, "cannot open \"%s\": %s", file_txt, strerror(errno));
|
||||
|
||||
files = parray_new();
|
||||
|
||||
|
14
src/fetch.c
14
src/fetch.c
@ -8,19 +8,11 @@
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#include "catalog/catalog.h"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/*
|
||||
* Read a file into memory. The file to be read is <datadir>/<path>.
|
||||
* The file contents are returned in a malloc'd buffer, and *filesize
|
||||
|
@ -6,6 +6,7 @@
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
static void help_init(void);
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
/*
|
||||
|
32
src/merge.c
32
src/merge.c
@ -75,12 +75,14 @@ do_merge(time_t backup_id)
|
||||
continue;
|
||||
else if (backup->start_time == backup_id && !dest_backup)
|
||||
{
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
if (backup->status != BACKUP_STATUS_OK &&
|
||||
/* It is possible that previous merging was interrupted */
|
||||
backup->status != BACKUP_STATUS_MERGING)
|
||||
elog(ERROR, "Backup %s has status: %s",
|
||||
base36enc(backup->start_time), status2str(backup->status));
|
||||
|
||||
if (backup->backup_mode == BACKUP_MODE_FULL)
|
||||
elog(ERROR, "Backup %s if full backup",
|
||||
elog(ERROR, "Backup %s is full backup",
|
||||
base36enc(backup->start_time));
|
||||
|
||||
dest_backup = backup;
|
||||
@ -93,19 +95,15 @@ do_merge(time_t backup_id)
|
||||
if (backup->start_time != prev_parent)
|
||||
continue;
|
||||
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
elog(ERROR, "Skipping backup %s, because it has non-valid status: %s",
|
||||
if (backup->status != BACKUP_STATUS_OK &&
|
||||
/* It is possible that previous merging was interrupted */
|
||||
backup->status != BACKUP_STATUS_MERGING)
|
||||
elog(ERROR, "Backup %s has status: %s",
|
||||
base36enc(backup->start_time), status2str(backup->status));
|
||||
|
||||
/* If we already found dest_backup, look for full backup */
|
||||
if (dest_backup && backup->backup_mode == BACKUP_MODE_FULL)
|
||||
{
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
elog(ERROR, "Parent full backup %s for the given backup %s has status: %s",
|
||||
base36enc_dup(backup->start_time),
|
||||
base36enc_dup(dest_backup->start_time),
|
||||
status2str(backup->status));
|
||||
|
||||
full_backup = backup;
|
||||
full_backup_idx = i;
|
||||
|
||||
@ -169,10 +167,10 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
|
||||
elog(LOG, "Merging backup %s with backup %s", from_backup_id, to_backup_id);
|
||||
|
||||
to_backup->status = BACKUP_STATUS_MERGING;
|
||||
pgBackupWriteBackupControlFile(to_backup);
|
||||
write_backup_status(to_backup);
|
||||
|
||||
from_backup->status = BACKUP_STATUS_MERGING;
|
||||
pgBackupWriteBackupControlFile(from_backup);
|
||||
write_backup_status(from_backup);
|
||||
|
||||
/*
|
||||
* Make backup paths.
|
||||
@ -328,7 +326,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
|
||||
to_backup->wal_bytes = BYTES_INVALID;
|
||||
|
||||
pgBackupWriteFileList(to_backup, files, from_database_path);
|
||||
pgBackupWriteBackupControlFile(to_backup);
|
||||
write_backup_status(to_backup);
|
||||
|
||||
/* Cleanup */
|
||||
pfree(threads_args);
|
||||
@ -353,15 +351,10 @@ merge_files(void *arg)
|
||||
merge_files_arg *argument = (merge_files_arg *) arg;
|
||||
pgBackup *to_backup = argument->to_backup;
|
||||
pgBackup *from_backup = argument->from_backup;
|
||||
char tmp_file_path[MAXPGPATH];
|
||||
int i,
|
||||
num_files = parray_num(argument->files);
|
||||
int to_root_len = strlen(argument->to_root);
|
||||
|
||||
if (to_backup->compress_alg == PGLZ_COMPRESS ||
|
||||
to_backup->compress_alg == ZLIB_COMPRESS)
|
||||
join_path_components(tmp_file_path, argument->to_root, "tmp");
|
||||
|
||||
for (i = 0; i < num_files; i++)
|
||||
{
|
||||
pgFile *file = (pgFile *) parray_get(argument->files, i);
|
||||
@ -434,8 +427,11 @@ merge_files(void *arg)
|
||||
if (to_backup->compress_alg == PGLZ_COMPRESS ||
|
||||
to_backup->compress_alg == ZLIB_COMPRESS)
|
||||
{
|
||||
char tmp_file_path[MAXPGPATH];
|
||||
char *prev_path;
|
||||
|
||||
snprintf(tmp_file_path, MAXPGPATH, "%s_tmp", to_path_tmp);
|
||||
|
||||
/* Start the magic */
|
||||
|
||||
/*
|
||||
|
263
src/parsexlog.c
263
src/parsexlog.c
@ -12,15 +12,15 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include "access/transam.h"
|
||||
#include "catalog/pg_control.h"
|
||||
#include "commands/dbcommands_xlog.h"
|
||||
#include "catalog/storage_xlog.h"
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
#include <zlib.h>
|
||||
#endif
|
||||
|
||||
#include "commands/dbcommands_xlog.h"
|
||||
#include "catalog/storage_xlog.h"
|
||||
#include "access/transam.h"
|
||||
#include "utils/thread.h"
|
||||
|
||||
/*
|
||||
@ -86,6 +86,7 @@ static bool getRecordTimestamp(XLogReaderState *record, TimestampTz *recordXtime
|
||||
|
||||
typedef struct XLogPageReadPrivate
|
||||
{
|
||||
int thread_num;
|
||||
const char *archivedir;
|
||||
TimeLineID tli;
|
||||
uint32 xlog_seg_size;
|
||||
@ -107,7 +108,6 @@ typedef struct XLogPageReadPrivate
|
||||
/* An argument for a thread function */
|
||||
typedef struct
|
||||
{
|
||||
int thread_num;
|
||||
XLogPageReadPrivate private_data;
|
||||
|
||||
XLogRecPtr startpoint;
|
||||
@ -136,6 +136,75 @@ static void PrintXLogCorruptionMsg(XLogPageReadPrivate *private_data,
|
||||
static XLogSegNo nextSegNoToRead = 0;
|
||||
static pthread_mutex_t wal_segment_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
/* copied from timestamp.c */
|
||||
static pg_time_t
|
||||
timestamptz_to_time_t(TimestampTz t)
|
||||
{
|
||||
pg_time_t result;
|
||||
|
||||
#ifdef HAVE_INT64_TIMESTAMP
|
||||
result = (pg_time_t) (t / USECS_PER_SEC +
|
||||
((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
|
||||
#else
|
||||
result = (pg_time_t) (t +
|
||||
((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do manual switch to the next WAL segment.
|
||||
*
|
||||
* Returns false if the reader reaches the end of a WAL segment list.
|
||||
*/
|
||||
static bool
|
||||
switchToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg)
|
||||
{
|
||||
XLogPageReadPrivate *private_data;
|
||||
XLogRecPtr found;
|
||||
|
||||
private_data = (XLogPageReadPrivate *) xlogreader->private_data;
|
||||
private_data->need_switch = false;
|
||||
|
||||
/* Critical section */
|
||||
pthread_lock(&wal_segment_mutex);
|
||||
Assert(nextSegNoToRead);
|
||||
private_data->xlogsegno = nextSegNoToRead;
|
||||
nextSegNoToRead++;
|
||||
pthread_mutex_unlock(&wal_segment_mutex);
|
||||
|
||||
/* We've reached the end */
|
||||
if (private_data->xlogsegno > arg->endSegNo)
|
||||
return false;
|
||||
|
||||
/* Adjust next record position */
|
||||
GetXLogRecPtr(private_data->xlogsegno, 0,
|
||||
private_data->xlog_seg_size, arg->startpoint);
|
||||
/* We need to close previously opened file if it wasn't closed earlier */
|
||||
CleanupXLogPageRead(xlogreader);
|
||||
/* Skip over the page header and contrecord if any */
|
||||
found = XLogFindNextRecord(xlogreader, arg->startpoint);
|
||||
|
||||
/*
|
||||
* We get invalid WAL record pointer usually when WAL segment is
|
||||
* absent or is corrupted.
|
||||
*/
|
||||
if (XLogRecPtrIsInvalid(found))
|
||||
{
|
||||
elog(WARNING, "Thread [%d]: could not read WAL record at %X/%X",
|
||||
private_data->thread_num,
|
||||
(uint32) (arg->startpoint >> 32), (uint32) (arg->startpoint));
|
||||
PrintXLogCorruptionMsg(private_data, ERROR);
|
||||
}
|
||||
arg->startpoint = found;
|
||||
|
||||
elog(VERBOSE, "Thread [%d]: switched to LSN %X/%X",
|
||||
private_data->thread_num,
|
||||
(uint32) (arg->startpoint >> 32), (uint32) (arg->startpoint));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* extractPageMap() worker.
|
||||
*/
|
||||
@ -146,6 +215,7 @@ doExtractPageMap(void *arg)
|
||||
XLogPageReadPrivate *private_data;
|
||||
XLogReaderState *xlogreader;
|
||||
XLogSegNo nextSegNo = 0;
|
||||
XLogRecPtr found;
|
||||
char *errormsg;
|
||||
|
||||
private_data = &extract_arg->private_data;
|
||||
@ -156,13 +226,27 @@ doExtractPageMap(void *arg)
|
||||
xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, private_data);
|
||||
#endif
|
||||
if (xlogreader == NULL)
|
||||
elog(ERROR, "out of memory");
|
||||
elog(ERROR, "Thread [%d]: out of memory", private_data->thread_num);
|
||||
xlogreader->system_identifier = system_identifier;
|
||||
|
||||
extract_arg->startpoint = XLogFindNextRecord(xlogreader,
|
||||
extract_arg->startpoint);
|
||||
found = XLogFindNextRecord(xlogreader, extract_arg->startpoint);
|
||||
|
||||
elog(VERBOSE, "Start LSN of thread %d: %X/%X",
|
||||
extract_arg->thread_num,
|
||||
/*
|
||||
* We get invalid WAL record pointer usually when WAL segment is absent or
|
||||
* is corrupted.
|
||||
*/
|
||||
if (XLogRecPtrIsInvalid(found))
|
||||
{
|
||||
elog(WARNING, "Thread [%d]: could not read WAL record at %X/%X",
|
||||
private_data->thread_num,
|
||||
(uint32) (extract_arg->startpoint >> 32),
|
||||
(uint32) (extract_arg->startpoint));
|
||||
PrintXLogCorruptionMsg(private_data, ERROR);
|
||||
}
|
||||
extract_arg->startpoint = found;
|
||||
|
||||
elog(VERBOSE, "Thread [%d]: Starting LSN: %X/%X",
|
||||
private_data->thread_num,
|
||||
(uint32) (extract_arg->startpoint >> 32),
|
||||
(uint32) (extract_arg->startpoint));
|
||||
|
||||
@ -174,7 +258,18 @@ doExtractPageMap(void *arg)
|
||||
XLogRecord *record;
|
||||
|
||||
if (interrupted)
|
||||
elog(ERROR, "Interrupted during WAL reading");
|
||||
elog(ERROR, "Thread [%d]: Interrupted during WAL reading",
|
||||
private_data->thread_num);
|
||||
|
||||
/*
|
||||
* We need to switch to the next WAL segment after reading previous
|
||||
* record. It may happen if we read contrecord.
|
||||
*/
|
||||
if (private_data->need_switch)
|
||||
{
|
||||
if (!switchToNextWal(xlogreader, extract_arg))
|
||||
break;
|
||||
}
|
||||
|
||||
record = XLogReadRecord(xlogreader, extract_arg->startpoint, &errormsg);
|
||||
|
||||
@ -183,50 +278,29 @@ doExtractPageMap(void *arg)
|
||||
XLogRecPtr errptr;
|
||||
|
||||
/*
|
||||
* Try to switch to the next WAL segment. Usually
|
||||
* SimpleXLogPageRead() does it by itself. But here we need to do it
|
||||
* manually to support threads.
|
||||
* There is no record, try to switch to the next WAL segment.
|
||||
* Usually SimpleXLogPageRead() does it by itself. But here we need
|
||||
* to do it manually to support threads.
|
||||
*/
|
||||
if (private_data->need_switch)
|
||||
if (private_data->need_switch && errormsg == NULL)
|
||||
{
|
||||
private_data->need_switch = false;
|
||||
|
||||
/* Critical section */
|
||||
pthread_lock(&wal_segment_mutex);
|
||||
Assert(nextSegNoToRead);
|
||||
private_data->xlogsegno = nextSegNoToRead;
|
||||
nextSegNoToRead++;
|
||||
pthread_mutex_unlock(&wal_segment_mutex);
|
||||
|
||||
/* We reach the end */
|
||||
if (private_data->xlogsegno > extract_arg->endSegNo)
|
||||
if (switchToNextWal(xlogreader, extract_arg))
|
||||
continue;
|
||||
else
|
||||
break;
|
||||
|
||||
/* Adjust next record position */
|
||||
GetXLogRecPtr(private_data->xlogsegno, 0,
|
||||
private_data->xlog_seg_size,
|
||||
extract_arg->startpoint);
|
||||
/* Skip over the page header */
|
||||
extract_arg->startpoint = XLogFindNextRecord(xlogreader,
|
||||
extract_arg->startpoint);
|
||||
|
||||
elog(VERBOSE, "Thread %d switched to LSN %X/%X",
|
||||
extract_arg->thread_num,
|
||||
(uint32) (extract_arg->startpoint >> 32),
|
||||
(uint32) (extract_arg->startpoint));
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
errptr = extract_arg->startpoint ?
|
||||
extract_arg->startpoint : xlogreader->EndRecPtr;
|
||||
|
||||
if (errormsg)
|
||||
elog(WARNING, "could not read WAL record at %X/%X: %s",
|
||||
elog(WARNING, "Thread [%d]: could not read WAL record at %X/%X: %s",
|
||||
private_data->thread_num,
|
||||
(uint32) (errptr >> 32), (uint32) (errptr),
|
||||
errormsg);
|
||||
else
|
||||
elog(WARNING, "could not read WAL record at %X/%X",
|
||||
elog(WARNING, "Thread [%d]: could not read WAL record at %X/%X",
|
||||
private_data->thread_num,
|
||||
(uint32) (errptr >> 32), (uint32) (errptr));
|
||||
|
||||
/*
|
||||
@ -245,7 +319,7 @@ doExtractPageMap(void *arg)
|
||||
GetXLogSegNo(xlogreader->EndRecPtr, nextSegNo,
|
||||
private_data->xlog_seg_size);
|
||||
} while (nextSegNo <= extract_arg->endSegNo &&
|
||||
xlogreader->EndRecPtr < extract_arg->endpoint);
|
||||
xlogreader->ReadRecPtr < extract_arg->endpoint);
|
||||
|
||||
CleanupXLogPageRead(xlogreader);
|
||||
XLogReaderFree(xlogreader);
|
||||
@ -259,16 +333,12 @@ doExtractPageMap(void *arg)
|
||||
* Read WAL from the archive directory, from 'startpoint' to 'endpoint' on the
|
||||
* given timeline. Collect data blocks touched by the WAL records into a page map.
|
||||
*
|
||||
* If **prev_segno** is true then read all segments up to **endpoint** segment
|
||||
* minus one. Else read all segments up to **endpoint** segment.
|
||||
*
|
||||
* Pagemap extracting is processed using threads. Eeach thread reads single WAL
|
||||
* file.
|
||||
*/
|
||||
void
|
||||
extractPageMap(const char *archivedir, TimeLineID tli, uint32 seg_size,
|
||||
XLogRecPtr startpoint, XLogRecPtr endpoint, bool prev_seg,
|
||||
parray *files)
|
||||
XLogRecPtr startpoint, XLogRecPtr endpoint, parray *files)
|
||||
{
|
||||
int i;
|
||||
int threads_need = 0;
|
||||
@ -289,8 +359,6 @@ extractPageMap(const char *archivedir, TimeLineID tli, uint32 seg_size,
|
||||
(uint32) (endpoint >> 32), (uint32) (endpoint));
|
||||
|
||||
GetXLogSegNo(endpoint, endSegNo, seg_size);
|
||||
if (prev_seg)
|
||||
endSegNo--;
|
||||
|
||||
nextSegNoToRead = 0;
|
||||
time(&start_time);
|
||||
@ -308,7 +376,7 @@ extractPageMap(const char *archivedir, TimeLineID tli, uint32 seg_size,
|
||||
{
|
||||
InitXLogPageRead(&thread_args[i].private_data, archivedir, tli,
|
||||
seg_size, false);
|
||||
thread_args[i].thread_num = i;
|
||||
thread_args[i].private_data.thread_num = i + 1;
|
||||
|
||||
thread_args[i].startpoint = startpoint;
|
||||
thread_args[i].endpoint = endpoint;
|
||||
@ -316,6 +384,8 @@ extractPageMap(const char *archivedir, TimeLineID tli, uint32 seg_size,
|
||||
/* By default there is some error */
|
||||
thread_args[i].ret = 1;
|
||||
|
||||
threads_need++;
|
||||
|
||||
/* Adjust startpoint to the next thread */
|
||||
if (nextSegNoToRead == 0)
|
||||
GetXLogSegNo(startpoint, nextSegNoToRead, seg_size);
|
||||
@ -328,16 +398,12 @@ extractPageMap(const char *archivedir, TimeLineID tli, uint32 seg_size,
|
||||
if (nextSegNoToRead > endSegNo)
|
||||
break;
|
||||
GetXLogRecPtr(nextSegNoToRead, 0, seg_size, startpoint);
|
||||
/* Skip over the page header */
|
||||
startpoint += SizeOfXLogLongPHD;
|
||||
|
||||
threads_need++;
|
||||
}
|
||||
|
||||
/* Run threads */
|
||||
for (i = 0; i < threads_need; i++)
|
||||
{
|
||||
elog(VERBOSE, "Start WAL reader thread: %d", i);
|
||||
elog(VERBOSE, "Start WAL reader thread: %d", i + 1);
|
||||
pthread_create(&threads[i], NULL, doExtractPageMap, &thread_args[i]);
|
||||
}
|
||||
|
||||
@ -408,7 +474,7 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup,
|
||||
* the backup is definitely corrupted. Update its status.
|
||||
*/
|
||||
backup->status = BACKUP_STATUS_CORRUPT;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
write_backup_status(backup);
|
||||
|
||||
elog(WARNING, "There are not enough WAL records to consistenly restore "
|
||||
"backup %s from START LSN: %X/%X to STOP LSN: %X/%X",
|
||||
@ -734,15 +800,39 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
|
||||
if (!IsInXLogSeg(targetPagePtr, private_data->xlogsegno,
|
||||
private_data->xlog_seg_size))
|
||||
{
|
||||
CleanupXLogPageRead(xlogreader);
|
||||
elog(VERBOSE, "Thread [%d]: Need to switch to segno next to %X/%X, current LSN %X/%X",
|
||||
private_data->thread_num,
|
||||
(uint32) (targetPagePtr >> 32), (uint32) (targetPagePtr),
|
||||
(uint32) (xlogreader->currRecPtr >> 32),
|
||||
(uint32) (xlogreader->currRecPtr ));
|
||||
|
||||
/*
|
||||
* Do not switch to next WAL segment in this function. Currently it is
|
||||
* manually switched only in doExtractPageMap().
|
||||
* if the last record on the page is not complete,
|
||||
* we must continue reading pages in the same thread
|
||||
*/
|
||||
if (private_data->manual_switch)
|
||||
if (!XLogRecPtrIsInvalid(xlogreader->currRecPtr) &&
|
||||
xlogreader->currRecPtr < targetPagePtr)
|
||||
{
|
||||
private_data->need_switch = true;
|
||||
return -1;
|
||||
CleanupXLogPageRead(xlogreader);
|
||||
|
||||
/*
|
||||
* Switch to the next WAL segment after reading contrecord.
|
||||
*/
|
||||
if (private_data->manual_switch)
|
||||
private_data->need_switch = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
CleanupXLogPageRead(xlogreader);
|
||||
/*
|
||||
* Do not switch to next WAL segment in this function. Currently it is
|
||||
* manually switched only in doExtractPageMap().
|
||||
*/
|
||||
if (private_data->manual_switch)
|
||||
{
|
||||
private_data->need_switch = true;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -761,7 +851,9 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
|
||||
|
||||
if (fileExists(private_data->xlogpath))
|
||||
{
|
||||
elog(LOG, "Opening WAL segment \"%s\"", private_data->xlogpath);
|
||||
elog(LOG, "Thread [%d]: Opening WAL segment \"%s\"",
|
||||
private_data->thread_num,
|
||||
private_data->xlogpath);
|
||||
|
||||
private_data->xlogexists = true;
|
||||
private_data->xlogfile = open(private_data->xlogpath,
|
||||
@ -769,8 +861,10 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
|
||||
|
||||
if (private_data->xlogfile < 0)
|
||||
{
|
||||
elog(WARNING, "Could not open WAL segment \"%s\": %s",
|
||||
private_data->xlogpath, strerror(errno));
|
||||
elog(WARNING, "Thread [%d]: Could not open WAL segment \"%s\": %s",
|
||||
private_data->thread_num,
|
||||
private_data->xlogpath,
|
||||
strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -783,16 +877,16 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
|
||||
private_data->xlogpath);
|
||||
if (fileExists(private_data->gz_xlogpath))
|
||||
{
|
||||
elog(LOG, "Opening compressed WAL segment \"%s\"",
|
||||
private_data->gz_xlogpath);
|
||||
elog(LOG, "Thread [%d]: Opening compressed WAL segment \"%s\"",
|
||||
private_data->thread_num, private_data->gz_xlogpath);
|
||||
|
||||
private_data->xlogexists = true;
|
||||
private_data->gz_xlogfile = gzopen(private_data->gz_xlogpath,
|
||||
"rb");
|
||||
if (private_data->gz_xlogfile == NULL)
|
||||
{
|
||||
elog(WARNING, "Could not open compressed WAL segment \"%s\": %s",
|
||||
private_data->gz_xlogpath, strerror(errno));
|
||||
elog(WARNING, "Thread [%d]: Could not open compressed WAL segment \"%s\": %s",
|
||||
private_data->thread_num, private_data->gz_xlogpath, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -814,15 +908,15 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
|
||||
{
|
||||
if (lseek(private_data->xlogfile, (off_t) targetPageOff, SEEK_SET) < 0)
|
||||
{
|
||||
elog(WARNING, "Could not seek in WAL segment \"%s\": %s",
|
||||
private_data->xlogpath, strerror(errno));
|
||||
elog(WARNING, "Thread [%d]: Could not seek in WAL segment \"%s\": %s",
|
||||
private_data->thread_num, private_data->xlogpath, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (read(private_data->xlogfile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
|
||||
{
|
||||
elog(WARNING, "Could not read from WAL segment \"%s\": %s",
|
||||
private_data->xlogpath, strerror(errno));
|
||||
elog(WARNING, "Thread [%d]: Could not read from WAL segment \"%s\": %s",
|
||||
private_data->thread_num, private_data->xlogpath, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -831,7 +925,8 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
|
||||
{
|
||||
if (gzseek(private_data->gz_xlogfile, (z_off_t) targetPageOff, SEEK_SET) == -1)
|
||||
{
|
||||
elog(WARNING, "Could not seek in compressed WAL segment \"%s\": %s",
|
||||
elog(WARNING, "Thread [%d]: Could not seek in compressed WAL segment \"%s\": %s",
|
||||
private_data->thread_num,
|
||||
private_data->gz_xlogpath,
|
||||
get_gz_error(private_data->gz_xlogfile));
|
||||
return -1;
|
||||
@ -839,7 +934,8 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
|
||||
|
||||
if (gzread(private_data->gz_xlogfile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
|
||||
{
|
||||
elog(WARNING, "Could not read from compressed WAL segment \"%s\": %s",
|
||||
elog(WARNING, "Thread [%d]: Could not read from compressed WAL segment \"%s\": %s",
|
||||
private_data->thread_num,
|
||||
private_data->gz_xlogpath,
|
||||
get_gz_error(private_data->gz_xlogfile));
|
||||
return -1;
|
||||
@ -876,6 +972,7 @@ InitXLogPageRead(XLogPageReadPrivate *private_data, const char *archivedir,
|
||||
#endif
|
||||
if (xlogreader == NULL)
|
||||
elog(ERROR, "out of memory");
|
||||
xlogreader->system_identifier = system_identifier;
|
||||
}
|
||||
|
||||
return xlogreader;
|
||||
@ -915,15 +1012,19 @@ PrintXLogCorruptionMsg(XLogPageReadPrivate *private_data, int elevel)
|
||||
* We throw a WARNING here to be able to update backup status.
|
||||
*/
|
||||
if (!private_data->xlogexists)
|
||||
elog(elevel, "WAL segment \"%s\" is absent", private_data->xlogpath);
|
||||
elog(elevel, "Thread [%d]: WAL segment \"%s\" is absent",
|
||||
private_data->thread_num,
|
||||
private_data->xlogpath);
|
||||
else if (private_data->xlogfile != -1)
|
||||
elog(elevel, "Possible WAL corruption. "
|
||||
elog(elevel, "Thread [%d]: Possible WAL corruption. "
|
||||
"Error has occured during reading WAL segment \"%s\"",
|
||||
private_data->thread_num,
|
||||
private_data->xlogpath);
|
||||
#ifdef HAVE_LIBZ
|
||||
else if (private_data->gz_xlogfile != NULL)
|
||||
elog(elevel, "Possible WAL corruption. "
|
||||
elog(elevel, "Thread [%d]: Possible WAL corruption. "
|
||||
"Error has occured during reading WAL segment \"%s\"",
|
||||
private_data->thread_num,
|
||||
private_data->gz_xlogpath);
|
||||
#endif
|
||||
}
|
||||
|
@ -9,20 +9,36 @@
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include "pg_getopt.h"
|
||||
#include "streamutil.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "utils/thread.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include "pg_getopt.h"
|
||||
|
||||
const char *PROGRAM_VERSION = "2.0.18";
|
||||
const char *PROGRAM_VERSION = "2.0.21";
|
||||
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
|
||||
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
|
||||
|
||||
typedef enum ProbackupSubcmd
|
||||
{
|
||||
NO_CMD = 0,
|
||||
INIT_CMD,
|
||||
ADD_INSTANCE_CMD,
|
||||
DELETE_INSTANCE_CMD,
|
||||
ARCHIVE_PUSH_CMD,
|
||||
ARCHIVE_GET_CMD,
|
||||
BACKUP_CMD,
|
||||
RESTORE_CMD,
|
||||
VALIDATE_CMD,
|
||||
DELETE_CMD,
|
||||
MERGE_CMD,
|
||||
SHOW_CMD,
|
||||
SET_CONFIG_CMD,
|
||||
SHOW_CONFIG_CMD
|
||||
} ProbackupSubcmd;
|
||||
|
||||
/* directory options */
|
||||
char *backup_path = NULL;
|
||||
char *pgdata = NULL;
|
||||
@ -113,7 +129,7 @@ ShowFormat show_format = SHOW_PLAIN;
|
||||
|
||||
/* current settings */
|
||||
pgBackup current;
|
||||
ProbackupSubcmd backup_subcmd = NO_CMD;
|
||||
static ProbackupSubcmd backup_subcmd = NO_CMD;
|
||||
|
||||
static bool help_opt = false;
|
||||
|
||||
@ -182,8 +198,8 @@ static pgut_option options[] =
|
||||
{ 's', 142, "log-filename", &log_filename, SOURCE_CMDLINE },
|
||||
{ 's', 143, "error-log-filename", &error_log_filename, SOURCE_CMDLINE },
|
||||
{ 's', 144, "log-directory", &log_directory, SOURCE_CMDLINE },
|
||||
{ 'u', 145, "log-rotation-size", &log_rotation_size, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
|
||||
{ 'u', 146, "log-rotation-age", &log_rotation_age, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MIN },
|
||||
{ 'U', 145, "log-rotation-size", &log_rotation_size, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
|
||||
{ 'U', 146, "log-rotation-age", &log_rotation_age, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
|
||||
/* connection options */
|
||||
{ 's', 'd', "pgdatabase", &pgut_dbname, SOURCE_CMDLINE },
|
||||
{ 's', 'h', "pghost", &host, SOURCE_CMDLINE },
|
||||
@ -534,7 +550,8 @@ main(int argc, char *argv[])
|
||||
if (delete_expired)
|
||||
return do_retention_purge();
|
||||
else
|
||||
return do_delete(current.backup_id);
|
||||
do_delete(current.backup_id);
|
||||
break;
|
||||
case MERGE_CMD:
|
||||
do_merge(current.backup_id);
|
||||
break;
|
||||
|
@ -11,40 +11,25 @@
|
||||
#define PG_PROBACKUP_H
|
||||
|
||||
#include "postgres_fe.h"
|
||||
#include "libpq-fe.h"
|
||||
|
||||
#include <limits.h>
|
||||
#include <libpq-fe.h>
|
||||
|
||||
#include "access/timeline.h"
|
||||
#include "access/xlogdefs.h"
|
||||
#include "access/xlog_internal.h"
|
||||
#include "catalog/pg_control.h"
|
||||
#include "storage/block.h"
|
||||
#include "storage/bufpage.h"
|
||||
#include "storage/checksum.h"
|
||||
#include "utils/pg_crc.h"
|
||||
#include "common/relpath.h"
|
||||
#include "port.h"
|
||||
|
||||
#ifdef FRONTEND
|
||||
#undef FRONTEND
|
||||
#include "port/atomics.h"
|
||||
#include "port/atomics.h"
|
||||
#define FRONTEND
|
||||
#else
|
||||
#include "port/atomics.h"
|
||||
#endif
|
||||
|
||||
#include "utils/logger.h"
|
||||
#include "utils/parray.h"
|
||||
#include "utils/pgut.h"
|
||||
|
||||
#include "datapagemap.h"
|
||||
|
||||
# define PG_STOP_BACKUP_TIMEOUT 300
|
||||
/*
|
||||
* Macro needed to parse ptrack.
|
||||
* NOTE Keep those values syncronised with definitions in ptrack.h
|
||||
*/
|
||||
#define PTRACK_BITS_PER_HEAPBLOCK 1
|
||||
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
|
||||
|
||||
/* Directory/File names */
|
||||
#define DATABASE_DIR "database"
|
||||
#define BACKUPS_DIR "backups"
|
||||
@ -63,8 +48,6 @@
|
||||
#define PG_BLACK_LIST "black_list"
|
||||
#define PG_TABLESPACE_MAP_FILE "tablespace_map"
|
||||
|
||||
#define LOG_FILENAME_DEFAULT "pg_probackup.log"
|
||||
#define LOG_DIRECTORY_DEFAULT "log"
|
||||
/* Direcotry/File permission */
|
||||
#define DIR_PERMISSION (0700)
|
||||
#define FILE_PERMISSION (0600)
|
||||
@ -141,24 +124,6 @@ typedef enum BackupMode
|
||||
BACKUP_MODE_FULL /* full backup */
|
||||
} BackupMode;
|
||||
|
||||
typedef enum ProbackupSubcmd
|
||||
{
|
||||
NO_CMD = 0,
|
||||
INIT_CMD,
|
||||
ADD_INSTANCE_CMD,
|
||||
DELETE_INSTANCE_CMD,
|
||||
ARCHIVE_PUSH_CMD,
|
||||
ARCHIVE_GET_CMD,
|
||||
BACKUP_CMD,
|
||||
RESTORE_CMD,
|
||||
VALIDATE_CMD,
|
||||
DELETE_CMD,
|
||||
MERGE_CMD,
|
||||
SHOW_CMD,
|
||||
SET_CONFIG_CMD,
|
||||
SHOW_CONFIG_CMD
|
||||
} ProbackupSubcmd;
|
||||
|
||||
typedef enum ShowFormat
|
||||
{
|
||||
SHOW_PLAIN,
|
||||
@ -195,8 +160,8 @@ typedef struct pgBackupConfig
|
||||
char *log_filename;
|
||||
char *error_log_filename;
|
||||
char *log_directory;
|
||||
int log_rotation_size;
|
||||
int log_rotation_age;
|
||||
uint64 log_rotation_size;
|
||||
uint64 log_rotation_age;
|
||||
|
||||
uint32 retention_redundancy;
|
||||
uint32 retention_window;
|
||||
@ -205,12 +170,9 @@ typedef struct pgBackupConfig
|
||||
int compress_level;
|
||||
} pgBackupConfig;
|
||||
|
||||
|
||||
/* Information about single backup stored in backup.conf */
|
||||
|
||||
|
||||
typedef struct pgBackup pgBackup;
|
||||
|
||||
/* Information about single backup stored in backup.conf */
|
||||
struct pgBackup
|
||||
{
|
||||
BackupMode backup_mode; /* Mode - one of BACKUP_MODE_xxx above*/
|
||||
@ -286,13 +248,6 @@ typedef struct pgRecoveryTarget
|
||||
bool restore_no_validate;
|
||||
} pgRecoveryTarget;
|
||||
|
||||
/* Union to ease operations on relation pages */
|
||||
typedef union DataPage
|
||||
{
|
||||
PageHeaderData page_data;
|
||||
char data[BLCKSZ];
|
||||
} DataPage;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
const char *from_root;
|
||||
@ -403,11 +358,6 @@ extern CompressAlg compress_alg;
|
||||
extern int compress_level;
|
||||
extern bool compress_shortcut;
|
||||
|
||||
#define COMPRESS_ALG_DEFAULT NOT_DEFINED_COMPRESS
|
||||
#define COMPRESS_LEVEL_DEFAULT 1
|
||||
|
||||
extern CompressAlg parse_compress_alg(const char *arg);
|
||||
extern const char* deparse_compress_alg(int alg);
|
||||
/* other options */
|
||||
extern char *instance_name;
|
||||
extern uint64 system_identifier;
|
||||
@ -418,7 +368,6 @@ extern ShowFormat show_format;
|
||||
|
||||
/* current settings */
|
||||
extern pgBackup current;
|
||||
extern ProbackupSubcmd backup_subcmd;
|
||||
|
||||
/* in dir.c */
|
||||
/* exclude directory list for $PGDATA file listing */
|
||||
@ -475,7 +424,7 @@ extern uint32 get_config_xlog_seg_size(void);
|
||||
extern int do_show(time_t requested_backup_id);
|
||||
|
||||
/* in delete.c */
|
||||
extern int do_delete(time_t backup_id);
|
||||
extern void do_delete(time_t backup_id);
|
||||
extern int do_retention_purge(void);
|
||||
extern int do_delete_instance(void);
|
||||
|
||||
@ -496,6 +445,9 @@ extern int do_validate_all(void);
|
||||
|
||||
/* in catalog.c */
|
||||
extern pgBackup *read_backup(time_t timestamp);
|
||||
extern void write_backup(pgBackup *backup);
|
||||
extern void write_backup_status(pgBackup *backup);
|
||||
|
||||
extern const char *pgBackupGetBackupMode(pgBackup *backup);
|
||||
|
||||
extern parray *catalog_get_backup_list(time_t requested_backup_id);
|
||||
@ -503,7 +455,6 @@ extern pgBackup *catalog_get_last_data_backup(parray *backup_list,
|
||||
TimeLineID tli);
|
||||
extern void catalog_lock(void);
|
||||
extern void pgBackupWriteControl(FILE *out, pgBackup *backup);
|
||||
extern void pgBackupWriteBackupControlFile(pgBackup *backup);
|
||||
extern void pgBackupWriteFileList(pgBackup *backup, parray *files,
|
||||
const char *root);
|
||||
|
||||
@ -517,7 +468,16 @@ extern void pgBackupFree(void *backup);
|
||||
extern int pgBackupCompareId(const void *f1, const void *f2);
|
||||
extern int pgBackupCompareIdDesc(const void *f1, const void *f2);
|
||||
|
||||
extern pgBackup* find_parent_backup(pgBackup *current_backup);
|
||||
extern pgBackup* find_parent_full_backup(pgBackup *current_backup);
|
||||
extern int scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup);
|
||||
extern bool is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive);
|
||||
extern int get_backup_index_number(parray *backup_list, pgBackup *backup);
|
||||
|
||||
#define COMPRESS_ALG_DEFAULT NOT_DEFINED_COMPRESS
|
||||
#define COMPRESS_LEVEL_DEFAULT 1
|
||||
|
||||
extern CompressAlg parse_compress_alg(const char *arg);
|
||||
extern const char* deparse_compress_alg(int alg);
|
||||
|
||||
/* in dir.c */
|
||||
extern void dir_list_file(parray *files, const char *root, bool exclude,
|
||||
@ -566,11 +526,14 @@ extern void get_wal_file(const char *from_path, const char *to_path);
|
||||
|
||||
extern bool calc_file_checksum(pgFile *file);
|
||||
|
||||
extern bool check_file_pages(pgFile* file,
|
||||
XLogRecPtr stop_lsn, uint32 checksum_version);
|
||||
|
||||
/* parsexlog.c */
|
||||
extern void extractPageMap(const char *datadir,
|
||||
extern void extractPageMap(const char *archivedir,
|
||||
TimeLineID tli, uint32 seg_size,
|
||||
XLogRecPtr startpoint, XLogRecPtr endpoint,
|
||||
bool prev_seg, parray *backup_files_list);
|
||||
parray *files);
|
||||
extern void validate_wal(pgBackup *backup,
|
||||
const char *archivedir,
|
||||
time_t target_time,
|
||||
@ -587,24 +550,22 @@ extern bool wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn,
|
||||
|
||||
/* in util.c */
|
||||
extern TimeLineID get_current_timeline(bool safe);
|
||||
extern XLogRecPtr get_checkpoint_location(PGconn *conn);
|
||||
extern uint64 get_system_identifier(char *pgdata);
|
||||
extern uint64 get_remote_system_identifier(PGconn *conn);
|
||||
extern uint32 get_data_checksum_version(bool safe);
|
||||
extern uint32 get_xlog_seg_size(char *pgdata_path);
|
||||
|
||||
extern void sanityChecks(void);
|
||||
extern void time2iso(char *buf, size_t len, time_t time);
|
||||
extern const char *status2str(BackupStatus status);
|
||||
extern void remove_trailing_space(char *buf, int comment_mark);
|
||||
extern void remove_not_digit(char *buf, size_t len, const char *str);
|
||||
extern uint32 get_data_checksum_version(bool safe);
|
||||
extern const char *base36enc(long unsigned int value);
|
||||
extern char *base36enc_dup(long unsigned int value);
|
||||
extern long unsigned int base36dec(const char *text);
|
||||
extern uint64 get_system_identifier(char *pgdata);
|
||||
extern uint64 get_remote_system_identifier(PGconn *conn);
|
||||
extern uint32 get_xlog_seg_size(char *pgdata_path);
|
||||
extern pg_time_t timestamptz_to_time_t(TimestampTz t);
|
||||
extern int parse_server_version(char *server_version_str);
|
||||
|
||||
/* in status.c */
|
||||
extern bool is_pg_running(void);
|
||||
|
||||
#ifdef WIN32
|
||||
#ifdef _DEBUG
|
||||
#define lseek _lseek
|
||||
|
200
src/restore.c
200
src/restore.c
@ -10,13 +10,11 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include "access/timeline.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "catalog/pg_control.h"
|
||||
#include "utils/logger.h"
|
||||
#include "utils/thread.h"
|
||||
|
||||
typedef struct
|
||||
@ -47,7 +45,9 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
bool is_restore)
|
||||
{
|
||||
int i = 0;
|
||||
int j = 0;
|
||||
parray *backups;
|
||||
pgBackup *tmp_backup = NULL;
|
||||
pgBackup *current_backup = NULL;
|
||||
pgBackup *dest_backup = NULL;
|
||||
pgBackup *base_full_backup = NULL;
|
||||
@ -110,14 +110,21 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
{
|
||||
|
||||
/* backup is not ok,
|
||||
* but in case of CORRUPT, ORPHAN or DONE revalidation can be done,
|
||||
* but in case of CORRUPT, ORPHAN or DONE revalidation is possible
|
||||
* unless --no-validate is used,
|
||||
* in other cases throw an error.
|
||||
*/
|
||||
// 1. validate
|
||||
// 2. validate -i INVALID_ID <- allowed revalidate
|
||||
// 3. restore -i INVALID_ID <- allowed revalidate and restore
|
||||
// 4. restore <- impossible
|
||||
// 5. restore --no-validate <- forbidden
|
||||
if (current_backup->status != BACKUP_STATUS_OK)
|
||||
{
|
||||
if (current_backup->status == BACKUP_STATUS_DONE ||
|
||||
if ((current_backup->status == BACKUP_STATUS_DONE ||
|
||||
current_backup->status == BACKUP_STATUS_ORPHAN ||
|
||||
current_backup->status == BACKUP_STATUS_CORRUPT)
|
||||
&& !rt->restore_no_validate)
|
||||
elog(WARNING, "Backup %s has status: %s",
|
||||
base36enc(current_backup->start_time), status2str(current_backup->status));
|
||||
else
|
||||
@ -159,25 +166,96 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
* Save it as dest_backup
|
||||
*/
|
||||
dest_backup = current_backup;
|
||||
dest_backup_index = i-1;
|
||||
}
|
||||
}
|
||||
|
||||
if (dest_backup == NULL)
|
||||
elog(ERROR, "Backup satisfying target options is not found.");
|
||||
|
||||
dest_backup_index = get_backup_index_number(backups, dest_backup);
|
||||
|
||||
/* If we already found dest_backup, look for full backup. */
|
||||
if (dest_backup)
|
||||
if (dest_backup->backup_mode == BACKUP_MODE_FULL)
|
||||
base_full_backup = dest_backup;
|
||||
else
|
||||
{
|
||||
base_full_backup = current_backup;
|
||||
int result;
|
||||
|
||||
if (current_backup->backup_mode != BACKUP_MODE_FULL)
|
||||
result = scan_parent_chain(dest_backup, &tmp_backup);
|
||||
|
||||
if (result == 0)
|
||||
{
|
||||
base_full_backup = find_parent_backup(current_backup);
|
||||
/* chain is broken, determine missing backup ID
|
||||
* and orphinize all his descendants
|
||||
*/
|
||||
char *missing_backup_id;
|
||||
time_t missing_backup_start_time;
|
||||
|
||||
if (base_full_backup == NULL)
|
||||
elog(ERROR, "Valid full backup for backup %s is not found.",
|
||||
base36enc(current_backup->start_time));
|
||||
missing_backup_start_time = tmp_backup->parent_backup;
|
||||
missing_backup_id = base36enc_dup(tmp_backup->parent_backup);
|
||||
|
||||
for (j = get_backup_index_number(backups, tmp_backup); j >= 0; j--)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backups, j);
|
||||
|
||||
/* use parent backup start_time because he is missing
|
||||
* and we must orphinize his descendants
|
||||
*/
|
||||
if (is_parent(missing_backup_start_time, backup, false))
|
||||
{
|
||||
if (backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
write_backup_status(backup);
|
||||
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s is missing",
|
||||
base36enc(backup->start_time), missing_backup_id);
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(WARNING, "Backup %s has missing parent %s",
|
||||
base36enc(backup->start_time), missing_backup_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* No point in doing futher */
|
||||
elog(ERROR, "%s of backup %s failed.", action, base36enc(dest_backup->start_time));
|
||||
}
|
||||
else if (result == 1)
|
||||
{
|
||||
/* chain is intact, but at least one parent is invalid */
|
||||
char *parent_backup_id;
|
||||
|
||||
/* parent_backup_id contain human-readable backup ID of oldest invalid backup */
|
||||
parent_backup_id = base36enc_dup(tmp_backup->start_time);
|
||||
|
||||
for (j = get_backup_index_number(backups, tmp_backup) - 1; j >= 0; j--)
|
||||
{
|
||||
|
||||
pgBackup *backup = (pgBackup *) parray_get(backups, j);
|
||||
|
||||
if (is_parent(tmp_backup->start_time, backup, false))
|
||||
{
|
||||
if (backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
write_backup_status(backup);
|
||||
|
||||
elog(WARNING,
|
||||
"Backup %s is orphaned because his parent %s has status: %s",
|
||||
base36enc(backup->start_time),
|
||||
parent_backup_id,
|
||||
status2str(tmp_backup->status));
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(WARNING, "Backup %s has parent %s with status: %s",
|
||||
base36enc(backup->start_time), parent_backup_id,
|
||||
status2str(tmp_backup->status));
|
||||
}
|
||||
}
|
||||
}
|
||||
tmp_backup = find_parent_full_backup(dest_backup);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -187,20 +265,14 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
* TODO I think we should rewrite it someday to use double linked list
|
||||
* and avoid relying on sort order anymore.
|
||||
*/
|
||||
for (i = dest_backup_index; i < parray_num(backups); i++)
|
||||
{
|
||||
pgBackup * temp_backup = (pgBackup *) parray_get(backups, i);
|
||||
if (temp_backup->start_time == base_full_backup->start_time)
|
||||
{
|
||||
base_full_backup_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
base_full_backup = tmp_backup;
|
||||
}
|
||||
|
||||
if (base_full_backup == NULL)
|
||||
elog(ERROR, "Full backup satisfying target options is not found.");
|
||||
|
||||
base_full_backup_index = get_backup_index_number(backups, base_full_backup);
|
||||
|
||||
/*
|
||||
* Ensure that directories provided in tablespace mapping are valid
|
||||
* i.e. empty or not exist.
|
||||
@ -215,27 +287,32 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
|
||||
/*
|
||||
* Validate backups from base_full_backup to dest_backup.
|
||||
* At this point we are sure that parent chain is intact.
|
||||
*/
|
||||
for (i = base_full_backup_index; i >= dest_backup_index; i--)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backups, i);
|
||||
tmp_backup = (pgBackup *) parray_get(backups, i);
|
||||
|
||||
pgBackupValidate(backup);
|
||||
/* Maybe we should be more paranoid and check for !BACKUP_STATUS_OK? */
|
||||
if (backup->status == BACKUP_STATUS_CORRUPT)
|
||||
if (is_parent(base_full_backup->start_time, tmp_backup, true))
|
||||
{
|
||||
corrupted_backup = backup;
|
||||
corrupted_backup_index = i;
|
||||
break;
|
||||
|
||||
pgBackupValidate(tmp_backup);
|
||||
/* Maybe we should be more paranoid and check for !BACKUP_STATUS_OK? */
|
||||
if (tmp_backup->status == BACKUP_STATUS_CORRUPT)
|
||||
{
|
||||
corrupted_backup = tmp_backup;
|
||||
corrupted_backup_index = i;
|
||||
break;
|
||||
}
|
||||
/* We do not validate WAL files of intermediate backups
|
||||
* It`s done to speed up restore
|
||||
*/
|
||||
}
|
||||
/* We do not validate WAL files of intermediate backups
|
||||
* It`s done to speed up restore
|
||||
*/
|
||||
}
|
||||
/* There is no point in wal validation
|
||||
* if there is corrupted backup between base_backup and dest_backup
|
||||
*/
|
||||
|
||||
/* There is no point in wal validation of corrupted backups */
|
||||
if (!corrupted_backup)
|
||||
{
|
||||
/*
|
||||
* Validate corresponding WAL files.
|
||||
* We pass base_full_backup timeline as last argument to this function,
|
||||
@ -244,39 +321,36 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
validate_wal(dest_backup, arclog_path, rt->recovery_target_time,
|
||||
rt->recovery_target_xid, rt->recovery_target_lsn,
|
||||
base_full_backup->tli, xlog_seg_size);
|
||||
|
||||
/* Set every incremental backup between corrupted backup and nearest FULL backup as orphans */
|
||||
if (corrupted_backup)
|
||||
}
|
||||
/* Orphinize every OK descendant of corrupted backup */
|
||||
else
|
||||
{
|
||||
for (i = corrupted_backup_index - 1; i >= 0; i--)
|
||||
char *corrupted_backup_id;
|
||||
corrupted_backup_id = base36enc_dup(corrupted_backup->start_time);
|
||||
|
||||
for (j = corrupted_backup_index - 1; j >= 0; j--)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backups, i);
|
||||
/* Mark incremental OK backup as orphan */
|
||||
if (backup->backup_mode == BACKUP_MODE_FULL)
|
||||
break;
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
continue;
|
||||
else
|
||||
pgBackup *backup = (pgBackup *) parray_get(backups, j);
|
||||
|
||||
if (is_parent(corrupted_backup->start_time, backup, false))
|
||||
{
|
||||
char *backup_id,
|
||||
*corrupted_backup_id;
|
||||
if (backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
write_backup_status(backup);
|
||||
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
|
||||
backup_id = base36enc_dup(backup->start_time);
|
||||
corrupted_backup_id = base36enc_dup(corrupted_backup->start_time);
|
||||
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s is corrupted",
|
||||
backup_id, corrupted_backup_id);
|
||||
|
||||
free(backup_id);
|
||||
free(corrupted_backup_id);
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
|
||||
base36enc(backup->start_time),
|
||||
corrupted_backup_id,
|
||||
status2str(corrupted_backup->status));
|
||||
}
|
||||
}
|
||||
}
|
||||
free(corrupted_backup_id);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: rewrite restore to use parent_chain
|
||||
/*
|
||||
* If dest backup is corrupted or was orphaned in previous check
|
||||
* produce corresponding error message
|
||||
@ -296,7 +370,9 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
elog(ERROR, "Backup %s has status: %s",
|
||||
base36enc(dest_backup->start_time), status2str(dest_backup->status));
|
||||
|
||||
/* We ensured that all backups are valid, now restore if required */
|
||||
/* We ensured that all backups are valid, now restore if required
|
||||
* TODO: use parent_link
|
||||
*/
|
||||
if (is_restore)
|
||||
{
|
||||
for (i = base_full_backup_index; i >= dest_backup_index; i--)
|
||||
@ -552,7 +628,7 @@ restore_files(void *arg)
|
||||
|
||||
/* print size of restored file */
|
||||
if (file->write_size != BYTES_INVALID)
|
||||
elog(LOG, "Restored file %s : " INT64_FORMAT " bytes",
|
||||
elog(VERBOSE, "Restored file %s : " INT64_FORMAT " bytes",
|
||||
file->path, file->write_size);
|
||||
}
|
||||
|
||||
|
234
src/show.c
234
src/show.c
@ -11,14 +11,27 @@
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
#include "utils/json.h"
|
||||
|
||||
typedef struct ShowBackendRow
|
||||
{
|
||||
const char *instance;
|
||||
const char *version;
|
||||
char backup_id[20];
|
||||
char recovery_time[100];
|
||||
const char *mode;
|
||||
const char *wal_mode;
|
||||
char tli[20];
|
||||
char duration[20];
|
||||
char data_bytes[20];
|
||||
char start_lsn[20];
|
||||
char stop_lsn[20];
|
||||
const char *status;
|
||||
} ShowBackendRow;
|
||||
|
||||
|
||||
static void show_instance_start(void);
|
||||
static void show_instance_end(void);
|
||||
@ -299,63 +312,194 @@ show_backup(time_t requested_backup_id)
|
||||
static void
|
||||
show_instance_plain(parray *backup_list, bool show_name)
|
||||
{
|
||||
#define SHOW_FIELDS_COUNT 12
|
||||
int i;
|
||||
const char *names[SHOW_FIELDS_COUNT] =
|
||||
{ "Instance", "Version", "ID", "Recovery Time",
|
||||
"Mode", "WAL", "Current/Parent TLI", "Time", "Data",
|
||||
"Start LSN", "Stop LSN", "Status" };
|
||||
const char *field_formats[SHOW_FIELDS_COUNT] =
|
||||
{ " %-*s ", " %-*s ", " %-*s ", " %-*s ",
|
||||
" %-*s ", " %-*s ", " %-*s ", " %*s ", " %*s ",
|
||||
" %*s ", " %*s ", " %-*s "};
|
||||
uint32 widths[SHOW_FIELDS_COUNT];
|
||||
uint32 widths_sum = 0;
|
||||
ShowBackendRow *rows;
|
||||
|
||||
if (show_name)
|
||||
printfPQExpBuffer(&show_buf, "\nBACKUP INSTANCE '%s'\n", instance_name);
|
||||
for (i = 0; i < SHOW_FIELDS_COUNT; i++)
|
||||
widths[i] = strlen(names[i]);
|
||||
|
||||
/* if you add new fields here, fix the header */
|
||||
/* show header */
|
||||
appendPQExpBufferStr(&show_buf,
|
||||
"============================================================================================================================================\n");
|
||||
appendPQExpBufferStr(&show_buf,
|
||||
" Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n");
|
||||
appendPQExpBufferStr(&show_buf,
|
||||
"============================================================================================================================================\n");
|
||||
rows = (ShowBackendRow *) palloc(parray_num(backup_list) *
|
||||
sizeof(ShowBackendRow));
|
||||
|
||||
/*
|
||||
* Fill row values and calculate maximum width of each field.
|
||||
*/
|
||||
for (i = 0; i < parray_num(backup_list); i++)
|
||||
{
|
||||
pgBackup *backup = parray_get(backup_list, i);
|
||||
TimeLineID parent_tli;
|
||||
char timestamp[100] = "----";
|
||||
char duration[20] = "----";
|
||||
char data_bytes_str[10] = "----";
|
||||
ShowBackendRow *row = &rows[i];
|
||||
int cur = 0;
|
||||
|
||||
/* Instance */
|
||||
row->instance = instance_name;
|
||||
widths[cur] = Max(widths[cur], strlen(row->instance));
|
||||
cur++;
|
||||
|
||||
/* Version */
|
||||
row->version = backup->server_version[0] ?
|
||||
backup->server_version : "----";
|
||||
widths[cur] = Max(widths[cur], strlen(row->version));
|
||||
cur++;
|
||||
|
||||
/* ID */
|
||||
snprintf(row->backup_id, lengthof(row->backup_id), "%s",
|
||||
base36enc(backup->start_time));
|
||||
widths[cur] = Max(widths[cur], strlen(row->backup_id));
|
||||
cur++;
|
||||
|
||||
/* Recovery Time */
|
||||
if (backup->recovery_time != (time_t) 0)
|
||||
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
|
||||
time2iso(row->recovery_time, lengthof(row->recovery_time),
|
||||
backup->recovery_time);
|
||||
else
|
||||
StrNCpy(row->recovery_time, "----", 4);
|
||||
widths[cur] = Max(widths[cur], strlen(row->recovery_time));
|
||||
cur++;
|
||||
|
||||
/* Mode */
|
||||
row->mode = pgBackupGetBackupMode(backup);
|
||||
widths[cur] = Max(widths[cur], strlen(row->mode));
|
||||
cur++;
|
||||
|
||||
/* WAL */
|
||||
row->wal_mode = backup->stream ? "STREAM": "ARCHIVE";
|
||||
widths[cur] = Max(widths[cur], strlen(row->wal_mode));
|
||||
cur++;
|
||||
|
||||
/* Current/Parent TLI */
|
||||
snprintf(row->tli, lengthof(row->tli), "%u / %u",
|
||||
backup->tli, get_parent_tli(backup->tli));
|
||||
widths[cur] = Max(widths[cur], strlen(row->tli));
|
||||
cur++;
|
||||
|
||||
/* Time */
|
||||
if (backup->end_time != (time_t) 0)
|
||||
snprintf(duration, lengthof(duration), "%.*lfs", 0,
|
||||
snprintf(row->duration, lengthof(row->duration), "%.*lfs", 0,
|
||||
difftime(backup->end_time, backup->start_time));
|
||||
else
|
||||
StrNCpy(row->duration, "----", 4);
|
||||
widths[cur] = Max(widths[cur], strlen(row->duration));
|
||||
cur++;
|
||||
|
||||
/*
|
||||
* Calculate Data field, in the case of full backup this shows the
|
||||
* total amount of data. For an differential backup, this size is only
|
||||
* the difference of data accumulated.
|
||||
*/
|
||||
pretty_size(backup->data_bytes, data_bytes_str,
|
||||
lengthof(data_bytes_str));
|
||||
/* Data */
|
||||
pretty_size(backup->data_bytes, row->data_bytes,
|
||||
lengthof(row->data_bytes));
|
||||
widths[cur] = Max(widths[cur], strlen(row->data_bytes));
|
||||
cur++;
|
||||
|
||||
/* Get parent timeline before printing */
|
||||
parent_tli = get_parent_tli(backup->tli);
|
||||
/* Start LSN */
|
||||
snprintf(row->start_lsn, lengthof(row->start_lsn), "%X/%X",
|
||||
(uint32) (backup->start_lsn >> 32),
|
||||
(uint32) backup->start_lsn);
|
||||
widths[cur] = Max(widths[cur], strlen(row->start_lsn));
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf,
|
||||
" %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n",
|
||||
instance_name,
|
||||
(backup->server_version[0] ? backup->server_version : "----"),
|
||||
base36enc(backup->start_time),
|
||||
timestamp,
|
||||
pgBackupGetBackupMode(backup),
|
||||
backup->stream ? "STREAM": "ARCHIVE",
|
||||
backup->tli,
|
||||
parent_tli,
|
||||
duration,
|
||||
data_bytes_str,
|
||||
(uint32) (backup->start_lsn >> 32),
|
||||
(uint32) backup->start_lsn,
|
||||
(uint32) (backup->stop_lsn >> 32),
|
||||
(uint32) backup->stop_lsn,
|
||||
status2str(backup->status));
|
||||
/* Stop LSN */
|
||||
snprintf(row->stop_lsn, lengthof(row->stop_lsn), "%X/%X",
|
||||
(uint32) (backup->stop_lsn >> 32),
|
||||
(uint32) backup->stop_lsn);
|
||||
widths[cur] = Max(widths[cur], strlen(row->stop_lsn));
|
||||
cur++;
|
||||
|
||||
/* Status */
|
||||
row->status = status2str(backup->status);
|
||||
widths[cur] = Max(widths[cur], strlen(row->status));
|
||||
}
|
||||
|
||||
for (i = 0; i < SHOW_FIELDS_COUNT; i++)
|
||||
widths_sum += widths[i] + 2 /* two space */;
|
||||
|
||||
if (show_name)
|
||||
appendPQExpBuffer(&show_buf, "\nBACKUP INSTANCE '%s'\n", instance_name);
|
||||
|
||||
/*
|
||||
* Print header.
|
||||
*/
|
||||
for (i = 0; i < widths_sum; i++)
|
||||
appendPQExpBufferChar(&show_buf, '=');
|
||||
appendPQExpBufferChar(&show_buf, '\n');
|
||||
|
||||
for (i = 0; i < SHOW_FIELDS_COUNT; i++)
|
||||
{
|
||||
appendPQExpBuffer(&show_buf, field_formats[i], widths[i], names[i]);
|
||||
}
|
||||
appendPQExpBufferChar(&show_buf, '\n');
|
||||
|
||||
for (i = 0; i < widths_sum; i++)
|
||||
appendPQExpBufferChar(&show_buf, '=');
|
||||
appendPQExpBufferChar(&show_buf, '\n');
|
||||
|
||||
/*
|
||||
* Print values.
|
||||
*/
|
||||
for (i = 0; i < parray_num(backup_list); i++)
|
||||
{
|
||||
ShowBackendRow *row = &rows[i];
|
||||
int cur = 0;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->instance);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->version);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->backup_id);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->recovery_time);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->mode);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->wal_mode);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->tli);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->duration);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->data_bytes);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->start_lsn);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->stop_lsn);
|
||||
cur++;
|
||||
|
||||
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
|
||||
row->status);
|
||||
cur++;
|
||||
|
||||
appendPQExpBufferChar(&show_buf, '\n');
|
||||
}
|
||||
|
||||
pfree(rows);
|
||||
}
|
||||
|
||||
/*
|
||||
|
118
src/status.c
118
src/status.c
@ -1,118 +0,0 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* status.c
|
||||
*
|
||||
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
|
||||
*
|
||||
* Monitor status of a PostgreSQL server.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#include <signal.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
/* PID can be negative for standalone backend */
|
||||
typedef long pgpid_t;
|
||||
|
||||
static pgpid_t get_pgpid(void);
|
||||
static bool postmaster_is_alive(pid_t pid);
|
||||
|
||||
/*
|
||||
* get_pgpid
|
||||
*
|
||||
* Get PID of postmaster, by scanning postmaster.pid.
|
||||
*/
|
||||
static pgpid_t
|
||||
get_pgpid(void)
|
||||
{
|
||||
FILE *pidf;
|
||||
long pid;
|
||||
char pid_file[MAXPGPATH];
|
||||
|
||||
snprintf(pid_file, lengthof(pid_file), "%s/postmaster.pid", pgdata);
|
||||
|
||||
pidf = fopen(pid_file, PG_BINARY_R);
|
||||
if (pidf == NULL)
|
||||
{
|
||||
/* No pid file, not an error on startup */
|
||||
if (errno == ENOENT)
|
||||
return 0;
|
||||
else
|
||||
{
|
||||
elog(ERROR, "could not open PID file \"%s\": %s",
|
||||
pid_file, strerror(errno));
|
||||
}
|
||||
}
|
||||
if (fscanf(pidf, "%ld", &pid) != 1)
|
||||
{
|
||||
/* Is the file empty? */
|
||||
if (ftell(pidf) == 0 && feof(pidf))
|
||||
elog(ERROR, "the PID file \"%s\" is empty",
|
||||
pid_file);
|
||||
else
|
||||
elog(ERROR, "invalid data in PID file \"%s\"\n",
|
||||
pid_file);
|
||||
}
|
||||
fclose(pidf);
|
||||
return (pgpid_t) pid;
|
||||
}
|
||||
|
||||
/*
|
||||
* postmaster_is_alive
|
||||
*
|
||||
* Check whether postmaster is alive or not.
|
||||
*/
|
||||
static bool
|
||||
postmaster_is_alive(pid_t pid)
|
||||
{
|
||||
/*
|
||||
* Test to see if the process is still there. Note that we do not
|
||||
* consider an EPERM failure to mean that the process is still there;
|
||||
* EPERM must mean that the given PID belongs to some other userid, and
|
||||
* considering the permissions on $PGDATA, that means it's not the
|
||||
* postmaster we are after.
|
||||
*
|
||||
* Don't believe that our own PID or parent shell's PID is the postmaster,
|
||||
* either. (Windows hasn't got getppid(), though.)
|
||||
*/
|
||||
if (pid == getpid())
|
||||
return false;
|
||||
#ifndef WIN32
|
||||
if (pid == getppid())
|
||||
return false;
|
||||
#endif
|
||||
if (kill(pid, 0) == 0)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* is_pg_running
|
||||
*
|
||||
*
|
||||
*/
|
||||
bool
|
||||
is_pg_running(void)
|
||||
{
|
||||
pgpid_t pid;
|
||||
|
||||
pid = get_pgpid();
|
||||
|
||||
/* 0 means no pid file */
|
||||
if (pid == 0)
|
||||
return false;
|
||||
|
||||
/* Case of a standalone backend */
|
||||
if (pid < 0)
|
||||
pid = -pid;
|
||||
|
||||
/* Check if postmaster is alive */
|
||||
return postmaster_is_alive((pid_t) pid);
|
||||
}
|
63
src/util.c
63
src/util.c
@ -10,12 +10,9 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <time.h>
|
||||
#include "catalog/pg_control.h"
|
||||
|
||||
#include "storage/bufpage.h"
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
#include "streamutil.h"
|
||||
#endif
|
||||
#include <time.h>
|
||||
|
||||
const char *
|
||||
base36enc(long unsigned int value)
|
||||
@ -125,6 +122,46 @@ get_current_timeline(bool safe)
|
||||
return ControlFile.checkPointCopy.ThisTimeLineID;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get last check point record ptr from pg_tonrol.
|
||||
*/
|
||||
XLogRecPtr
|
||||
get_checkpoint_location(PGconn *conn)
|
||||
{
|
||||
#if PG_VERSION_NUM >= 90600
|
||||
PGresult *res;
|
||||
uint32 lsn_hi;
|
||||
uint32 lsn_lo;
|
||||
XLogRecPtr lsn;
|
||||
|
||||
#if PG_VERSION_NUM >= 100000
|
||||
res = pgut_execute(conn,
|
||||
"SELECT checkpoint_lsn FROM pg_catalog.pg_control_checkpoint()",
|
||||
0, NULL);
|
||||
#else
|
||||
res = pgut_execute(conn,
|
||||
"SELECT checkpoint_location FROM pg_catalog.pg_control_checkpoint()",
|
||||
0, NULL);
|
||||
#endif
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
|
||||
PQclear(res);
|
||||
/* Calculate LSN */
|
||||
lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
|
||||
|
||||
return lsn;
|
||||
#else
|
||||
char *buffer;
|
||||
size_t size;
|
||||
ControlFileData ControlFile;
|
||||
|
||||
buffer = fetchFile(conn, "global/pg_control", &size);
|
||||
digestControlFile(&ControlFile, buffer, size);
|
||||
pg_free(buffer);
|
||||
|
||||
return ControlFile.checkPoint;
|
||||
#endif
|
||||
}
|
||||
|
||||
uint64
|
||||
get_system_identifier(char *pgdata_path)
|
||||
{
|
||||
@ -243,22 +280,6 @@ time2iso(char *buf, size_t len, time_t time)
|
||||
}
|
||||
}
|
||||
|
||||
/* copied from timestamp.c */
|
||||
pg_time_t
|
||||
timestamptz_to_time_t(TimestampTz t)
|
||||
{
|
||||
pg_time_t result;
|
||||
|
||||
#ifdef HAVE_INT64_TIMESTAMP
|
||||
result = (pg_time_t) (t / USECS_PER_SEC +
|
||||
((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
|
||||
#else
|
||||
result = (pg_time_t) (t +
|
||||
((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Parse string representation of the server version */
|
||||
int
|
||||
parse_server_version(char *server_version_str)
|
||||
|
@ -7,15 +7,12 @@
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "logger.h"
|
||||
#include "pgut.h"
|
||||
#include "pg_probackup.h"
|
||||
#include "thread.h"
|
||||
|
||||
/* Logger parameters */
|
||||
@ -33,9 +30,9 @@ char *log_directory = NULL;
|
||||
char log_path[MAXPGPATH] = "";
|
||||
|
||||
/* Maximum size of an individual log file in kilobytes */
|
||||
int log_rotation_size = 0;
|
||||
uint64 log_rotation_size = 0;
|
||||
/* Maximum lifetime of an individual log file in minutes */
|
||||
int log_rotation_age = 0;
|
||||
uint64 log_rotation_age = 0;
|
||||
|
||||
/* Implementation for logging.h */
|
||||
|
||||
@ -552,8 +549,8 @@ open_logfile(FILE **file, const char *filename_format)
|
||||
/* Parsed creation time */
|
||||
|
||||
rotation_requested = (cur_time - creation_time) >
|
||||
/* convert to seconds */
|
||||
log_rotation_age * 60;
|
||||
/* convert to seconds from milliseconds */
|
||||
log_rotation_age / 1000;
|
||||
}
|
||||
else
|
||||
elog_stderr(ERROR, "cannot read creation timestamp from "
|
||||
|
@ -10,8 +10,6 @@
|
||||
#ifndef LOGGER_H
|
||||
#define LOGGER_H
|
||||
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#define LOG_NONE (-10)
|
||||
|
||||
/* Log level */
|
||||
@ -36,12 +34,15 @@ extern char log_path[MAXPGPATH];
|
||||
|
||||
#define LOG_ROTATION_SIZE_DEFAULT 0
|
||||
#define LOG_ROTATION_AGE_DEFAULT 0
|
||||
extern int log_rotation_size;
|
||||
extern int log_rotation_age;
|
||||
extern uint64 log_rotation_size;
|
||||
extern uint64 log_rotation_age;
|
||||
|
||||
#define LOG_LEVEL_CONSOLE_DEFAULT INFO
|
||||
#define LOG_LEVEL_FILE_DEFAULT LOG_OFF
|
||||
|
||||
#define LOG_FILENAME_DEFAULT "pg_probackup.log"
|
||||
#define LOG_DIRECTORY_DEFAULT "log"
|
||||
|
||||
#undef elog
|
||||
extern void elog(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3);
|
||||
extern void elog_file(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3);
|
||||
|
@ -7,7 +7,10 @@
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "src/pg_probackup.h"
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#include "parray.h"
|
||||
#include "pgut.h"
|
||||
|
||||
/* members of struct parray are hidden from client. */
|
||||
struct parray
|
||||
|
173
src/utils/pgut.c
173
src/utils/pgut.c
@ -9,24 +9,16 @@
|
||||
*/
|
||||
|
||||
#include "postgres_fe.h"
|
||||
#include "libpq/pqsignal.h"
|
||||
|
||||
#include "getopt_long.h"
|
||||
#include <limits.h>
|
||||
#include "libpq-fe.h"
|
||||
#include "libpq/pqsignal.h"
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "logger.h"
|
||||
#include "pgut.h"
|
||||
|
||||
/* old gcc doesn't have LLONG_MAX. */
|
||||
#ifndef LLONG_MAX
|
||||
#if defined(HAVE_LONG_INT_64) || !defined(HAVE_LONG_LONG_INT_64)
|
||||
#define LLONG_MAX LONG_MAX
|
||||
#else
|
||||
#define LLONG_MAX INT64CONST(0x7FFFFFFFFFFFFFFF)
|
||||
#endif
|
||||
#endif
|
||||
#include "logger.h"
|
||||
|
||||
#define MAX_TZDISP_HOUR 15 /* maximum allowed hour part */
|
||||
#define SECS_PER_MINUTE 60
|
||||
@ -249,7 +241,7 @@ assign_option(pgut_option *opt, const char *optarg, pgut_optsrc src)
|
||||
*(char **) opt->var = pgut_strdup(optarg);
|
||||
if (strcmp(optarg,"") != 0)
|
||||
return;
|
||||
message = "a valid string. But provided: ";
|
||||
message = "a valid string";
|
||||
break;
|
||||
case 't':
|
||||
if (parse_time(optarg, opt->var,
|
||||
@ -298,7 +290,13 @@ convert_to_base_unit(int64 value, const char *unit,
|
||||
if (table[i].multiplier < 0)
|
||||
*base_value = value / (-table[i].multiplier);
|
||||
else
|
||||
{
|
||||
/* Check for integer overflow first */
|
||||
if (value > PG_INT64_MAX / table[i].multiplier)
|
||||
return false;
|
||||
|
||||
*base_value = value * table[i].multiplier;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -328,7 +326,13 @@ convert_to_base_unit_u(uint64 value, const char *unit,
|
||||
if (table[i].multiplier < 0)
|
||||
*base_value = value / (-table[i].multiplier);
|
||||
else
|
||||
{
|
||||
/* Check for integer overflow first */
|
||||
if (value > PG_UINT64_MAX / table[i].multiplier)
|
||||
return false;
|
||||
|
||||
*base_value = value * table[i].multiplier;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -366,6 +370,10 @@ convert_from_base_unit(int64 base_value, int base_unit,
|
||||
*/
|
||||
if (table[i].multiplier < 0)
|
||||
{
|
||||
/* Check for integer overflow first */
|
||||
if (base_value > PG_INT64_MAX / (-table[i].multiplier))
|
||||
continue;
|
||||
|
||||
*value = base_value * (-table[i].multiplier);
|
||||
*unit = table[i].unit;
|
||||
break;
|
||||
@ -410,6 +418,10 @@ convert_from_base_unit_u(uint64 base_value, int base_unit,
|
||||
*/
|
||||
if (table[i].multiplier < 0)
|
||||
{
|
||||
/* Check for integer overflow first */
|
||||
if (base_value > PG_UINT64_MAX / (-table[i].multiplier))
|
||||
continue;
|
||||
|
||||
*value = base_value * (-table[i].multiplier);
|
||||
*unit = table[i].unit;
|
||||
break;
|
||||
@ -607,7 +619,7 @@ parse_int32(const char *value, int32 *result, int flags)
|
||||
|
||||
if (strcmp(value, INFINITE_STR) == 0)
|
||||
{
|
||||
*result = INT_MAX;
|
||||
*result = PG_INT32_MAX;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -616,12 +628,17 @@ parse_int32(const char *value, int32 *result, int flags)
|
||||
if (endptr == value || (*endptr && flags == 0))
|
||||
return false;
|
||||
|
||||
/* Check for integer overflow */
|
||||
if (errno == ERANGE || val != (int64) ((int32) val))
|
||||
return false;
|
||||
|
||||
if (!parse_unit(endptr, flags, val, &val))
|
||||
return false;
|
||||
|
||||
/* Check for integer overflow again */
|
||||
if (val != (int64) ((int32) val))
|
||||
return false;
|
||||
|
||||
*result = val;
|
||||
|
||||
return true;
|
||||
@ -639,7 +656,7 @@ parse_uint32(const char *value, uint32 *result, int flags)
|
||||
|
||||
if (strcmp(value, INFINITE_STR) == 0)
|
||||
{
|
||||
*result = UINT_MAX;
|
||||
*result = PG_UINT32_MAX;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -648,12 +665,17 @@ parse_uint32(const char *value, uint32 *result, int flags)
|
||||
if (endptr == value || (*endptr && flags == 0))
|
||||
return false;
|
||||
|
||||
/* Check for integer overflow */
|
||||
if (errno == ERANGE || val != (uint64) ((uint32) val))
|
||||
return false;
|
||||
|
||||
if (!parse_unit_u(endptr, flags, val, &val))
|
||||
return false;
|
||||
|
||||
/* Check for integer overflow again */
|
||||
if (val != (uint64) ((uint32) val))
|
||||
return false;
|
||||
|
||||
*result = val;
|
||||
|
||||
return true;
|
||||
@ -671,7 +693,7 @@ parse_int64(const char *value, int64 *result, int flags)
|
||||
|
||||
if (strcmp(value, INFINITE_STR) == 0)
|
||||
{
|
||||
*result = LLONG_MAX;
|
||||
*result = PG_INT64_MAX;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -709,13 +731,7 @@ parse_uint64(const char *value, uint64 *result, int flags)
|
||||
|
||||
if (strcmp(value, INFINITE_STR) == 0)
|
||||
{
|
||||
#if defined(HAVE_LONG_INT_64)
|
||||
*result = ULONG_MAX;
|
||||
#elif defined(HAVE_LONG_LONG_INT_64)
|
||||
*result = ULLONG_MAX;
|
||||
#else
|
||||
*result = ULONG_MAX;
|
||||
#endif
|
||||
*result = PG_UINT64_MAX;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1637,31 +1653,6 @@ pgut_disconnect(PGconn *conn)
|
||||
PQfinish(conn);
|
||||
}
|
||||
|
||||
/* set/get host and port for connecting standby server */
|
||||
const char *
|
||||
pgut_get_host()
|
||||
{
|
||||
return host;
|
||||
}
|
||||
|
||||
const char *
|
||||
pgut_get_port()
|
||||
{
|
||||
return port;
|
||||
}
|
||||
|
||||
void
|
||||
pgut_set_host(const char *new_host)
|
||||
{
|
||||
host = new_host;
|
||||
}
|
||||
|
||||
void
|
||||
pgut_set_port(const char *new_port)
|
||||
{
|
||||
port = new_port;
|
||||
}
|
||||
|
||||
|
||||
PGresult *
|
||||
pgut_execute_parallel(PGconn* conn,
|
||||
@ -2136,60 +2127,6 @@ get_username(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
appendStringInfoFile(StringInfo str, FILE *fp)
|
||||
{
|
||||
AssertArg(str != NULL);
|
||||
AssertArg(fp != NULL);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (str->maxlen - str->len < 2 && enlargeStringInfo(str, 1024) == 0)
|
||||
return errno = ENOMEM;
|
||||
|
||||
rc = fread(str->data + str->len, 1, str->maxlen - str->len - 1, fp);
|
||||
if (rc == 0)
|
||||
break;
|
||||
else if (rc > 0)
|
||||
{
|
||||
str->len += rc;
|
||||
str->data[str->len] = '\0';
|
||||
}
|
||||
else if (ferror(fp) && errno != EINTR)
|
||||
return errno;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
appendStringInfoFd(StringInfo str, int fd)
|
||||
{
|
||||
AssertArg(str != NULL);
|
||||
AssertArg(fd != -1);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (str->maxlen - str->len < 2 && enlargeStringInfo(str, 1024) == 0)
|
||||
return errno = ENOMEM;
|
||||
|
||||
rc = read(fd, str->data + str->len, str->maxlen - str->len - 1);
|
||||
if (rc == 0)
|
||||
break;
|
||||
else if (rc > 0)
|
||||
{
|
||||
str->len += rc;
|
||||
str->data[str->len] = '\0';
|
||||
}
|
||||
else if (errno != EINTR)
|
||||
return errno;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *
|
||||
pgut_malloc(size_t size)
|
||||
{
|
||||
@ -2226,36 +2163,6 @@ pgut_strdup(const char *str)
|
||||
return ret;
|
||||
}
|
||||
|
||||
char *
|
||||
strdup_with_len(const char *str, size_t len)
|
||||
{
|
||||
char *r;
|
||||
|
||||
if (str == NULL)
|
||||
return NULL;
|
||||
|
||||
r = pgut_malloc(len + 1);
|
||||
memcpy(r, str, len);
|
||||
r[len] = '\0';
|
||||
return r;
|
||||
}
|
||||
|
||||
/* strdup but trim whitespaces at head and tail */
|
||||
char *
|
||||
strdup_trim(const char *str)
|
||||
{
|
||||
size_t len;
|
||||
|
||||
if (str == NULL)
|
||||
return NULL;
|
||||
|
||||
while (IsSpace(str[0])) { str++; }
|
||||
len = strlen(str);
|
||||
while (len > 0 && IsSpace(str[len - 1])) { len--; }
|
||||
|
||||
return strdup_with_len(str, len);
|
||||
}
|
||||
|
||||
FILE *
|
||||
pgut_fopen(const char *path, const char *mode, bool missing_ok)
|
||||
{
|
||||
|
@ -11,26 +11,9 @@
|
||||
#ifndef PGUT_H
|
||||
#define PGUT_H
|
||||
|
||||
#include "libpq-fe.h"
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "postgres_fe.h"
|
||||
#include "access/xlogdefs.h"
|
||||
#include "logger.h"
|
||||
|
||||
#if !defined(C_H) && !defined(__cplusplus)
|
||||
#ifndef bool
|
||||
typedef char bool;
|
||||
#endif
|
||||
#ifndef true
|
||||
#define true ((bool) 1)
|
||||
#endif
|
||||
#ifndef false
|
||||
#define false ((bool) 0)
|
||||
#endif
|
||||
#endif
|
||||
#include "libpq-fe.h"
|
||||
|
||||
#define INFINITE_STR "INFINITE"
|
||||
|
||||
@ -139,19 +122,12 @@ extern bool pgut_send(PGconn* conn, const char *query, int nParams, const char *
|
||||
extern void pgut_cancel(PGconn* conn);
|
||||
extern int pgut_wait(int num, PGconn *connections[], struct timeval *timeout);
|
||||
|
||||
extern const char *pgut_get_host(void);
|
||||
extern const char *pgut_get_port(void);
|
||||
extern void pgut_set_host(const char *new_host);
|
||||
extern void pgut_set_port(const char *new_port);
|
||||
|
||||
/*
|
||||
* memory allocators
|
||||
*/
|
||||
extern void *pgut_malloc(size_t size);
|
||||
extern void *pgut_realloc(void *p, size_t size);
|
||||
extern char *pgut_strdup(const char *str);
|
||||
extern char *strdup_with_len(const char *str, size_t len);
|
||||
extern char *strdup_trim(const char *str);
|
||||
|
||||
#define pgut_new(type) ((type *) pgut_malloc(sizeof(type)))
|
||||
#define pgut_newarray(type, n) ((type *) pgut_malloc(sizeof(type) * (n)))
|
||||
@ -178,28 +154,6 @@ extern FILE *pgut_fopen(const char *path, const char *mode, bool missing_ok);
|
||||
#define AssertMacro(x) ((void) 0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* StringInfo and string operations
|
||||
*/
|
||||
#define STRINGINFO_H
|
||||
|
||||
#define StringInfoData PQExpBufferData
|
||||
#define StringInfo PQExpBuffer
|
||||
#define makeStringInfo createPQExpBuffer
|
||||
#define initStringInfo initPQExpBuffer
|
||||
#define freeStringInfo destroyPQExpBuffer
|
||||
#define termStringInfo termPQExpBuffer
|
||||
#define resetStringInfo resetPQExpBuffer
|
||||
#define enlargeStringInfo enlargePQExpBuffer
|
||||
#define printfStringInfo printfPQExpBuffer /* reset + append */
|
||||
#define appendStringInfo appendPQExpBuffer
|
||||
#define appendStringInfoString appendPQExpBufferStr
|
||||
#define appendStringInfoChar appendPQExpBufferChar
|
||||
#define appendBinaryStringInfo appendBinaryPQExpBuffer
|
||||
|
||||
extern int appendStringInfoFile(StringInfo str, FILE *fp);
|
||||
extern int appendStringInfoFd(StringInfo str, int fd);
|
||||
|
||||
extern bool parse_bool(const char *value, bool *result);
|
||||
extern bool parse_bool_with_len(const char *value, size_t len, bool *result);
|
||||
extern bool parse_int32(const char *value, int32 *result, int flags);
|
||||
@ -219,8 +173,6 @@ extern void convert_from_base_unit_u(uint64 base_value, int base_unit,
|
||||
#define IsSpace(c) (isspace((unsigned char)(c)))
|
||||
#define IsAlpha(c) (isalpha((unsigned char)(c)))
|
||||
#define IsAlnum(c) (isalnum((unsigned char)(c)))
|
||||
#define IsIdentHead(c) (IsAlpha(c) || (c) == '_')
|
||||
#define IsIdentBody(c) (IsAlnum(c) || (c) == '_')
|
||||
#define ToLower(c) (tolower((unsigned char)(c)))
|
||||
#define ToUpper(c) (toupper((unsigned char)(c)))
|
||||
|
||||
|
206
src/validate.c
206
src/validate.c
@ -24,6 +24,8 @@ typedef struct
|
||||
{
|
||||
parray *files;
|
||||
bool corrupted;
|
||||
XLogRecPtr stop_lsn;
|
||||
uint32 checksum_version;
|
||||
|
||||
/*
|
||||
* Return value from the thread.
|
||||
@ -62,6 +64,12 @@ pgBackupValidate(pgBackup *backup)
|
||||
|
||||
if (backup->status == BACKUP_STATUS_OK || backup->status == BACKUP_STATUS_DONE)
|
||||
elog(INFO, "Validating backup %s", base36enc(backup->start_time));
|
||||
/* backups in MERGING status must have an option of revalidation without losing MERGING status
|
||||
else if (backup->status == BACKUP_STATUS_MERGING)
|
||||
{
|
||||
some message here
|
||||
}
|
||||
*/
|
||||
else
|
||||
elog(INFO, "Revalidating backup %s", base36enc(backup->start_time));
|
||||
|
||||
@ -94,6 +102,8 @@ pgBackupValidate(pgBackup *backup)
|
||||
|
||||
arg->files = files;
|
||||
arg->corrupted = false;
|
||||
arg->stop_lsn = backup->stop_lsn;
|
||||
arg->checksum_version = backup->checksum_version;
|
||||
/* By default there are some error */
|
||||
threads_args[i].ret = 1;
|
||||
|
||||
@ -123,7 +133,7 @@ pgBackupValidate(pgBackup *backup)
|
||||
|
||||
/* Update backup status */
|
||||
backup->status = corrupted ? BACKUP_STATUS_CORRUPT : BACKUP_STATUS_OK;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
write_backup_status(backup);
|
||||
|
||||
if (corrupted)
|
||||
elog(WARNING, "Backup %s data files are corrupted", base36enc(backup->start_time));
|
||||
@ -201,7 +211,13 @@ pgBackupValidateFiles(void *arg)
|
||||
elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X",
|
||||
file->path, file->crc, crc);
|
||||
arguments->corrupted = true;
|
||||
break;
|
||||
|
||||
/* validate relation blocks */
|
||||
if (file->is_datafile)
|
||||
{
|
||||
if (!check_file_pages(file, arguments->stop_lsn, arguments->checksum_version))
|
||||
arguments->corrupted = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -282,6 +298,7 @@ do_validate_instance(void)
|
||||
{
|
||||
char *current_backup_id;
|
||||
int i;
|
||||
int j;
|
||||
parray *backups;
|
||||
pgBackup *current_backup = NULL;
|
||||
|
||||
@ -296,56 +313,187 @@ do_validate_instance(void)
|
||||
/* Examine backups one by one and validate them */
|
||||
for (i = 0; i < parray_num(backups); i++)
|
||||
{
|
||||
pgBackup *base_full_backup;
|
||||
char *parent_backup_id;
|
||||
|
||||
current_backup = (pgBackup *) parray_get(backups, i);
|
||||
|
||||
/* Valiate each backup along with its xlog files. */
|
||||
/* Find ancestor for incremental backup */
|
||||
if (current_backup->backup_mode != BACKUP_MODE_FULL)
|
||||
{
|
||||
pgBackup *tmp_backup = NULL;
|
||||
int result;
|
||||
|
||||
result = scan_parent_chain(current_backup, &tmp_backup);
|
||||
|
||||
/* chain is broken */
|
||||
if (result == 0)
|
||||
{
|
||||
/* determine missing backup ID */
|
||||
|
||||
parent_backup_id = base36enc_dup(tmp_backup->parent_backup);
|
||||
corrupted_backup_found = true;
|
||||
|
||||
/* orphanize current_backup */
|
||||
if (current_backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
current_backup->status = BACKUP_STATUS_ORPHAN;
|
||||
write_backup_status(current_backup);
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s is missing",
|
||||
base36enc(current_backup->start_time),
|
||||
parent_backup_id);
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(WARNING, "Backup %s has missing parent %s",
|
||||
base36enc(current_backup->start_time), parent_backup_id);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
/* chain is whole, but at least one parent is invalid */
|
||||
else if (result == 1)
|
||||
{
|
||||
/* determine corrupt backup ID */
|
||||
parent_backup_id = base36enc_dup(tmp_backup->start_time);
|
||||
|
||||
/* Oldest corrupt backup has a chance for revalidation */
|
||||
if (current_backup->start_time != tmp_backup->start_time)
|
||||
{
|
||||
/* orphanize current_backup */
|
||||
if (current_backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
current_backup->status = BACKUP_STATUS_ORPHAN;
|
||||
write_backup_status(current_backup);
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
|
||||
base36enc(current_backup->start_time), parent_backup_id,
|
||||
status2str(tmp_backup->status));
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(WARNING, "Backup %s has parent %s with status: %s",
|
||||
base36enc(current_backup->start_time),parent_backup_id,
|
||||
status2str(tmp_backup->status));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
base_full_backup = find_parent_full_backup(current_backup);
|
||||
}
|
||||
/* chain is whole, all parents are valid at first glance,
|
||||
* current backup validation can proceed
|
||||
*/
|
||||
else
|
||||
base_full_backup = tmp_backup;
|
||||
}
|
||||
else
|
||||
base_full_backup = current_backup;
|
||||
|
||||
/* Valiate backup files*/
|
||||
pgBackupValidate(current_backup);
|
||||
|
||||
/* Ensure that the backup has valid list of parent backups */
|
||||
/* Validate corresponding WAL files */
|
||||
if (current_backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
pgBackup *base_full_backup = current_backup;
|
||||
|
||||
if (current_backup->backup_mode != BACKUP_MODE_FULL)
|
||||
{
|
||||
base_full_backup = find_parent_backup(current_backup);
|
||||
|
||||
if (base_full_backup == NULL)
|
||||
elog(ERROR, "Valid full backup for backup %s is not found.",
|
||||
base36enc(current_backup->start_time));
|
||||
}
|
||||
|
||||
/* Validate corresponding WAL files */
|
||||
validate_wal(current_backup, arclog_path, 0,
|
||||
0, 0, base_full_backup->tli, xlog_seg_size);
|
||||
}
|
||||
|
||||
/* Mark every incremental backup between corrupted backup and nearest FULL backup as orphans */
|
||||
/*
|
||||
* Mark every descendant of corrupted backup as orphan
|
||||
*/
|
||||
if (current_backup->status == BACKUP_STATUS_CORRUPT)
|
||||
{
|
||||
int j;
|
||||
/* This is ridiculous but legal.
|
||||
* PAGE1_2b <- OK
|
||||
* PAGE1_2a <- OK
|
||||
* PAGE1_1b <- ORPHAN
|
||||
* PAGE1_1a <- CORRUPT
|
||||
* FULL1 <- OK
|
||||
*/
|
||||
|
||||
corrupted_backup_found = true;
|
||||
current_backup_id = base36enc_dup(current_backup->start_time);
|
||||
|
||||
for (j = i - 1; j >= 0; j--)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backups, j);
|
||||
|
||||
if (backup->backup_mode == BACKUP_MODE_FULL)
|
||||
break;
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
continue;
|
||||
else
|
||||
if (is_parent(current_backup->start_time, backup, false))
|
||||
{
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
if (backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
write_backup_status(backup);
|
||||
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s is corrupted",
|
||||
base36enc(backup->start_time), current_backup_id);
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
|
||||
base36enc(backup->start_time),
|
||||
current_backup_id,
|
||||
status2str(current_backup->status));
|
||||
}
|
||||
}
|
||||
}
|
||||
free(current_backup_id);
|
||||
}
|
||||
|
||||
/* For every OK backup we try to revalidate all his ORPHAN descendants. */
|
||||
if (current_backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
/* revalidate all ORPHAN descendats
|
||||
* be very careful not to miss a missing backup
|
||||
* for every backup we must check that he is descendant of current_backup
|
||||
*/
|
||||
for (j = i - 1; j >= 0; j--)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backups, j);
|
||||
pgBackup *tmp_backup = NULL;
|
||||
int result;
|
||||
|
||||
//PAGE3b ORPHAN
|
||||
//PAGE2b ORPHAN -----
|
||||
//PAGE6a ORPHAN |
|
||||
//PAGE5a CORRUPT |
|
||||
//PAGE4a missing |
|
||||
//PAGE3a missing |
|
||||
//PAGE2a ORPHAN |
|
||||
//PAGE1a OK <- we are here <-|
|
||||
//FULL OK
|
||||
|
||||
if (is_parent(current_backup->start_time, backup, false))
|
||||
{
|
||||
/* Revalidation make sense only if parent chain is whole.
|
||||
* is_parent() do not guarantee that.
|
||||
*/
|
||||
result = scan_parent_chain(backup, &tmp_backup);
|
||||
|
||||
if (result == 1)
|
||||
{
|
||||
/* revalidation make sense only if oldest invalid backup is current_backup
|
||||
*/
|
||||
|
||||
if (tmp_backup->start_time != backup->start_time)
|
||||
continue;
|
||||
|
||||
if (backup->status == BACKUP_STATUS_ORPHAN)
|
||||
{
|
||||
/* Revaliate backup files*/
|
||||
pgBackupValidate(backup);
|
||||
|
||||
if (backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
//tmp_backup = find_parent_full_backup(dest_backup);
|
||||
/* Revalidation successful, validate corresponding WAL files */
|
||||
validate_wal(backup, arclog_path, 0,
|
||||
0, 0, current_backup->tli,
|
||||
xlog_seg_size);
|
||||
}
|
||||
}
|
||||
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
{
|
||||
corrupted_backup_found = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* cleanup */
|
||||
|
@ -13,6 +13,9 @@ Check physical correctness of restored instances:
|
||||
Check archive compression:
|
||||
export ARCHIVE_COMPRESSION=ON
|
||||
|
||||
Enable compatibility tests:
|
||||
export PGPROBACKUPBIN_OLD=/path/to/previous_version_pg_probackup_binary
|
||||
|
||||
Specify path to pg_probackup binary file. By default tests use <Path to Git repository>/pg_probackup/
|
||||
export PGPROBACKUPBIN=<path to pg_probackup>
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
import unittest
|
||||
|
||||
from . import init_test, option_test, show_test, \
|
||||
backup_test, delete_test, restore_test, validate_test, \
|
||||
retention_test, ptrack_clean, ptrack_cluster, \
|
||||
ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \
|
||||
ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
|
||||
from . import init_test, merge, option_test, show_test, compatibility, \
|
||||
backup_test, delete_test, delta, restore_test, validate_test, \
|
||||
retention_test, ptrack_clean, ptrack_empty, ptrack_cluster, \
|
||||
ptrack_move_to_tablespace, ptrack_recovery, ptrack_truncate, \
|
||||
ptrack_vacuum, ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
|
||||
ptrack_vacuum_full, ptrack_vacuum_truncate, pgpro560, pgpro589, \
|
||||
false_positive, replica, compression, page, ptrack, archive, \
|
||||
exclude, cfs_backup, cfs_restore, cfs_validate_backup, auth_test
|
||||
@ -15,22 +15,28 @@ def load_tests(loader, tests, pattern):
|
||||
# suite.addTests(loader.loadTestsFromModule(auth_test))
|
||||
suite.addTests(loader.loadTestsFromModule(archive))
|
||||
suite.addTests(loader.loadTestsFromModule(backup_test))
|
||||
suite.addTests(loader.loadTestsFromModule(compatibility))
|
||||
suite.addTests(loader.loadTestsFromModule(cfs_backup))
|
||||
# suite.addTests(loader.loadTestsFromModule(cfs_restore))
|
||||
suite.addTests(loader.loadTestsFromModule(cfs_restore))
|
||||
# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup))
|
||||
# suite.addTests(loader.loadTestsFromModule(logging))
|
||||
suite.addTests(loader.loadTestsFromModule(compression))
|
||||
suite.addTests(loader.loadTestsFromModule(compatibility))
|
||||
suite.addTests(loader.loadTestsFromModule(delete_test))
|
||||
suite.addTests(loader.loadTestsFromModule(delta))
|
||||
suite.addTests(loader.loadTestsFromModule(exclude))
|
||||
suite.addTests(loader.loadTestsFromModule(false_positive))
|
||||
suite.addTests(loader.loadTestsFromModule(init_test))
|
||||
suite.addTests(loader.loadTestsFromModule(merge))
|
||||
suite.addTests(loader.loadTestsFromModule(option_test))
|
||||
suite.addTests(loader.loadTestsFromModule(page))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_empty))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_recovery))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_truncate))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_frozen))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_visibility))
|
||||
|
@ -93,7 +93,7 @@ class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase):
|
||||
)
|
||||
|
||||
try:
|
||||
self.node.start()
|
||||
self.node.slow_start()
|
||||
except ProbackupException as e:
|
||||
self.fail(
|
||||
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
|
||||
@ -151,7 +151,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
|
||||
"ERROR: File pg_compression not found in tablespace dir"
|
||||
)
|
||||
try:
|
||||
self.node.start()
|
||||
self.node.slow_start()
|
||||
except ProbackupException as e:
|
||||
self.fail(
|
||||
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
|
||||
@ -189,7 +189,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
|
||||
"ERROR: File pg_compression not found in backup dir"
|
||||
)
|
||||
try:
|
||||
self.node.start()
|
||||
self.node.slow_start()
|
||||
except ProbackupException as e:
|
||||
self.fail(
|
||||
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
|
||||
@ -213,11 +213,13 @@ class CfsRestoreNoencTest(CfsRestoreBase):
|
||||
self.node.cleanup()
|
||||
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
|
||||
|
||||
self.node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname))
|
||||
self.node_new.cleanup()
|
||||
node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname))
|
||||
node_new.cleanup()
|
||||
|
||||
try:
|
||||
self.restore_node(self.backup_dir, 'node', self.node_new, backup_id=self.backup_id)
|
||||
self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id)
|
||||
node_new.append_conf("postgresql.auto.conf",
|
||||
"port = {0}".format(node_new.port))
|
||||
except ProbackupException as e:
|
||||
self.fail(
|
||||
"ERROR: Restore from full backup failed. \n {0} \n {1}".format(
|
||||
@ -230,7 +232,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
|
||||
"ERROR: File pg_compression not found in backup dir"
|
||||
)
|
||||
try:
|
||||
self.node_new.start()
|
||||
node_new.slow_start()
|
||||
except ProbackupException as e:
|
||||
self.fail(
|
||||
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
|
||||
@ -240,10 +242,10 @@ class CfsRestoreNoencTest(CfsRestoreBase):
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
|
||||
repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
|
||||
repr(self.table_t1)
|
||||
)
|
||||
self.node_new.cleanup()
|
||||
node_new.cleanup()
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
@ -255,11 +257,13 @@ class CfsRestoreNoencTest(CfsRestoreBase):
|
||||
self.node.cleanup()
|
||||
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
|
||||
|
||||
self.node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname))
|
||||
self.node_new.cleanup()
|
||||
node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname))
|
||||
node_new.cleanup()
|
||||
|
||||
try:
|
||||
self.restore_node(self.backup_dir, 'node', self.node_new, backup_id=self.backup_id, options=['-j', '5'])
|
||||
self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id, options=['-j', '5'])
|
||||
node_new.append_conf("postgresql.auto.conf",
|
||||
"port = {0}".format(node_new.port))
|
||||
except ProbackupException as e:
|
||||
self.fail(
|
||||
"ERROR: Restore from full backup failed. \n {0} \n {1}".format(
|
||||
@ -272,7 +276,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
|
||||
"ERROR: File pg_compression not found in backup dir"
|
||||
)
|
||||
try:
|
||||
self.node_new.start()
|
||||
node_new.slow_start()
|
||||
except ProbackupException as e:
|
||||
self.fail(
|
||||
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
|
||||
@ -282,10 +286,10 @@ class CfsRestoreNoencTest(CfsRestoreBase):
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
|
||||
repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
|
||||
repr(self.table_t1)
|
||||
)
|
||||
self.node_new.cleanup()
|
||||
node_new.cleanup()
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
@ -319,7 +323,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
|
||||
"ERROR: File pg_compression not found in new tablespace location"
|
||||
)
|
||||
try:
|
||||
self.node.start()
|
||||
self.node.slow_start()
|
||||
except ProbackupException as e:
|
||||
self.fail(
|
||||
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
|
||||
@ -365,7 +369,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
|
||||
"ERROR: File pg_compression not found in new tablespace location"
|
||||
)
|
||||
try:
|
||||
self.node.start()
|
||||
self.node.slow_start()
|
||||
except ProbackupException as e:
|
||||
self.fail(
|
||||
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
|
||||
|
313
tests/compatibility.py
Normal file
313
tests/compatibility.py
Normal file
@ -0,0 +1,313 @@
|
||||
import unittest
|
||||
import subprocess
|
||||
import os
|
||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||
from sys import exit
|
||||
|
||||
module_name = 'compatibility'
|
||||
|
||||
|
||||
class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
def test_backward_compatibility_page(self):
|
||||
"""Description in jira issue PGPRO-434"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'max_wal_senders': '2',
|
||||
'autovacuum': 'off'}
|
||||
)
|
||||
self.init_pb(backup_dir, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.add_instance(backup_dir, 'node', node, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.set_archiving(backup_dir, 'node', node, old_binary=True)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=10)
|
||||
|
||||
# FULL backup with old binary
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.validate_pb(backup_dir)
|
||||
|
||||
# RESTORE old FULL with new binary
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Page BACKUP with old binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Page BACKUP with new binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
options=['--log-level-file=verbose'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
def test_backward_compatibility_delta(self):
|
||||
"""Description in jira issue PGPRO-434"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'max_wal_senders': '2',
|
||||
'autovacuum': 'off'}
|
||||
)
|
||||
self.init_pb(backup_dir, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.add_instance(backup_dir, 'node', node, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.set_archiving(backup_dir, 'node', node, old_binary=True)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=10)
|
||||
|
||||
# FULL backup with old binary
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.validate_pb(backup_dir)
|
||||
|
||||
# RESTORE old FULL with new binary
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Delta BACKUP with old binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Delta BACKUP with new binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
options=['--log-level-file=verbose'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
def test_backward_compatibility_ptrack(self):
|
||||
"""Description in jira issue PGPRO-434"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'max_wal_senders': '2',
|
||||
'autovacuum': 'off'}
|
||||
)
|
||||
self.init_pb(backup_dir, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.add_instance(backup_dir, 'node', node, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.set_archiving(backup_dir, 'node', node, old_binary=True)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=10)
|
||||
|
||||
# FULL backup with old binary
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.validate_pb(backup_dir)
|
||||
|
||||
# RESTORE old FULL with new binary
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Delta BACKUP with old binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Delta BACKUP with new binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
options=['--log-level-file=verbose'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
@ -55,6 +55,51 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_delete_archive_mix_compress_and_non_compressed_segments(self):
|
||||
"""delete full backups"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
# full backup
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
show_backups = self.show_pb(backup_dir, 'node')
|
||||
id_1 = show_backups[0]['ID']
|
||||
id_2 = show_backups[1]['ID']
|
||||
id_3 = show_backups[2]['ID']
|
||||
self.delete_pb(backup_dir, 'node', id_2)
|
||||
show_backups = self.show_pb(backup_dir, 'node')
|
||||
self.assertEqual(show_backups[0]['ID'], id_1)
|
||||
self.assertEqual(show_backups[1]['ID'], id_3)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_delete_increment_page(self):
|
||||
"""delete increment and all after him"""
|
||||
|
@ -1263,3 +1263,73 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
def test_delta_nullified_heap_page_backup(self):
|
||||
"""
|
||||
make node, take full backup, nullify some heap block,
|
||||
take delta backup, restore, physically compare pgdata`s
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=1)
|
||||
|
||||
file_path = node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_relation_filepath('pgbench_accounts')").rstrip()
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CHECKPOINT")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node)
|
||||
|
||||
# Nullify some block in PostgreSQL
|
||||
file = os.path.join(node.data_dir, file_path)
|
||||
|
||||
with open(file, 'r+b', 0) as f:
|
||||
f.seek(8192)
|
||||
f.write(b"\x00"*8192)
|
||||
f.flush()
|
||||
f.close
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='delta', options=["--log-level-file=verbose"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
log_file_path = os.path.join(backup_dir, "log", "pg_probackup.log")
|
||||
with open(log_file_path) as f:
|
||||
self.assertTrue("LOG: File: {0} blknum 1, empty page".format(
|
||||
file) in f.read())
|
||||
self.assertFalse("Skipping blknum: 1 in file: {0}".format(
|
||||
file) in f.read())
|
||||
|
||||
# Restore DELTA backup
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname),
|
||||
)
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -7,7 +7,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
||||
|
||||
pg_probackup init -B backup-path
|
||||
|
||||
pg_probackup set-config -B backup-dir --instance=instance_name
|
||||
pg_probackup set-config -B backup-path --instance=instance_name
|
||||
[--log-level-console=log-level-console]
|
||||
[--log-level-file=log-level-file]
|
||||
[--log-filename=log-filename]
|
||||
@ -23,8 +23,9 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
||||
[--master-db=db_name] [--master-host=host_name]
|
||||
[--master-port=port] [--master-user=user_name]
|
||||
[--replica-timeout=timeout]
|
||||
[--archive-timeout=timeout]
|
||||
|
||||
pg_probackup show-config -B backup-dir --instance=instance_name
|
||||
pg_probackup show-config -B backup-path --instance=instance_name
|
||||
[--format=format]
|
||||
|
||||
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
|
||||
@ -50,8 +51,8 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
||||
[--master-port=port] [--master-user=user_name]
|
||||
[--replica-timeout=timeout]
|
||||
|
||||
pg_probackup restore -B backup-dir --instance=instance_name
|
||||
[-D pgdata-dir] [-i backup-id] [--progress]
|
||||
pg_probackup restore -B backup-path --instance=instance_name
|
||||
[-D pgdata-path] [-i backup-id] [--progress]
|
||||
[--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]
|
||||
[--timeline=timeline] [-T OLDDIR=NEWDIR]
|
||||
[--immediate] [--recovery-target-name=target-name]
|
||||
@ -59,35 +60,37 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
||||
[--restore-as-replica]
|
||||
[--no-validate]
|
||||
|
||||
pg_probackup validate -B backup-dir [--instance=instance_name]
|
||||
pg_probackup validate -B backup-path [--instance=instance_name]
|
||||
[-i backup-id] [--progress]
|
||||
[--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]
|
||||
[--recovery-target-name=target-name]
|
||||
[--timeline=timeline]
|
||||
|
||||
pg_probackup show -B backup-dir
|
||||
pg_probackup show -B backup-path
|
||||
[--instance=instance_name [-i backup-id]]
|
||||
[--format=format]
|
||||
|
||||
pg_probackup delete -B backup-dir --instance=instance_name
|
||||
pg_probackup delete -B backup-path --instance=instance_name
|
||||
[--wal] [-i backup-id | --expired]
|
||||
|
||||
pg_probackup merge -B backup-dir --instance=instance_name
|
||||
pg_probackup merge -B backup-path --instance=instance_name
|
||||
-i backup-id
|
||||
|
||||
pg_probackup add-instance -B backup-dir -D pgdata-dir
|
||||
pg_probackup add-instance -B backup-path -D pgdata-path
|
||||
--instance=instance_name
|
||||
|
||||
pg_probackup del-instance -B backup-dir
|
||||
pg_probackup del-instance -B backup-path
|
||||
--instance=instance_name
|
||||
|
||||
pg_probackup archive-push -B backup-dir --instance=instance_name
|
||||
pg_probackup archive-push -B backup-path --instance=instance_name
|
||||
--wal-file-path=wal-file-path
|
||||
--wal-file-name=wal-file-name
|
||||
[--compress [--compress-level=compress-level]]
|
||||
[--compress]
|
||||
[--compress-algorithm=compress-algorithm]
|
||||
[--compress-level=compress-level]
|
||||
[--overwrite]
|
||||
|
||||
pg_probackup archive-get -B backup-dir --instance=instance_name
|
||||
pg_probackup archive-get -B backup-path --instance=instance_name
|
||||
--wal-file-path=wal-file-path
|
||||
--wal-file-name=wal-file-name
|
||||
|
||||
|
@ -1 +1 @@
|
||||
pg_probackup 2.0.18
|
||||
pg_probackup 2.0.21
|
@ -13,7 +13,9 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
@unittest.expectedFailure
|
||||
def test_validate_wal_lost_segment(self):
|
||||
"""Loose segment located between backups. ExpectedFailure. This is BUG """
|
||||
"""
|
||||
Loose segment located between backups. ExpectedFailure. This is BUG
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
@ -31,14 +33,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
# make some wals
|
||||
node.pgbench_init(scale=2)
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "10"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
node.pgbench_init(scale=5)
|
||||
|
||||
# delete last wal segment
|
||||
wals_dir = os.path.join(backup_dir, "wal", 'node')
|
||||
|
@ -48,6 +48,16 @@ idx_ptrack = {
|
||||
'column': 'tsvector',
|
||||
'relation': 't_heap'
|
||||
},
|
||||
't_hash': {
|
||||
'type': 'hash',
|
||||
'column': 'id',
|
||||
'relation': 't_heap'
|
||||
},
|
||||
't_bloom': {
|
||||
'type': 'bloom',
|
||||
'column': 'id',
|
||||
'relation': 't_heap'
|
||||
}
|
||||
}
|
||||
|
||||
archive_script = """
|
||||
@ -221,11 +231,49 @@ class ProbackupTest(object):
|
||||
self.probackup_path = self.test_env["PGPROBACKUPBIN"]
|
||||
else:
|
||||
if self.verbose:
|
||||
print('PGPROBINDIR is not an executable file')
|
||||
print('PGPROBACKUPBIN is not an executable file')
|
||||
|
||||
if not self.probackup_path:
|
||||
self.probackup_path = os.path.abspath(os.path.join(
|
||||
probackup_path_tmp = os.path.join(
|
||||
testgres.get_pg_config()["BINDIR"], 'pg_probackup')
|
||||
|
||||
if os.path.isfile(probackup_path_tmp):
|
||||
if not os.access(probackup_path_tmp, os.X_OK):
|
||||
print('{0} is not an executable file'.format(
|
||||
probackup_path_tmp))
|
||||
else:
|
||||
self.probackup_path = probackup_path_tmp
|
||||
|
||||
if not self.probackup_path:
|
||||
probackup_path_tmp = os.path.abspath(os.path.join(
|
||||
self.dir_path, "../pg_probackup"))
|
||||
|
||||
if os.path.isfile(probackup_path_tmp):
|
||||
if not os.access(probackup_path_tmp, os.X_OK):
|
||||
print('{0} is not an executable file'.format(
|
||||
probackup_path_tmp))
|
||||
else:
|
||||
self.probackup_path = probackup_path_tmp
|
||||
|
||||
if not self.probackup_path:
|
||||
print('pg_probackup binary is not found')
|
||||
exit(1)
|
||||
|
||||
os.environ['PATH'] = os.path.dirname(
|
||||
self.probackup_path) + ":" + os.environ['PATH']
|
||||
|
||||
self.probackup_old_path = None
|
||||
|
||||
if "PGPROBACKUPBIN_OLD" in self.test_env:
|
||||
if (
|
||||
os.path.isfile(self.test_env["PGPROBACKUPBIN_OLD"]) and
|
||||
os.access(self.test_env["PGPROBACKUPBIN_OLD"], os.X_OK)
|
||||
):
|
||||
self.probackup_old_path = self.test_env["PGPROBACKUPBIN_OLD"]
|
||||
else:
|
||||
if self.verbose:
|
||||
print('PGPROBACKUPBIN_OLD is not an executable file')
|
||||
|
||||
def make_simple_node(
|
||||
self,
|
||||
base_dir=None,
|
||||
@ -405,8 +453,9 @@ class ProbackupTest(object):
|
||||
if idx_dict['ptrack'][PageNum] != 1:
|
||||
if self.verbose:
|
||||
print(
|
||||
'Page Number {0} of type {1} was added,'
|
||||
' but ptrack value is {2}. THIS IS BAD'.format(
|
||||
'File: {0}\n Page Number {1} of type {2} was added,'
|
||||
' but ptrack value is {3}. THIS IS BAD'.format(
|
||||
idx_dict['path'],
|
||||
PageNum, idx_dict['type'],
|
||||
idx_dict['ptrack'][PageNum])
|
||||
)
|
||||
@ -415,13 +464,14 @@ class ProbackupTest(object):
|
||||
continue
|
||||
if PageNum not in idx_dict['new_pages']:
|
||||
# Page is not present now, meaning that relation got smaller
|
||||
# Ptrack should be equal to 0,
|
||||
# Ptrack should be equal to 1,
|
||||
# We are not freaking out about false positive stuff
|
||||
if idx_dict['ptrack'][PageNum] != 0:
|
||||
if idx_dict['ptrack'][PageNum] != 1:
|
||||
if self.verbose:
|
||||
print(
|
||||
'Page Number {0} of type {1} was deleted,'
|
||||
' but ptrack value is {2}'.format(
|
||||
'File: {0}\n Page Number {1} of type {2} was deleted,'
|
||||
' but ptrack value is {3}. THIS IS BAD'.format(
|
||||
idx_dict['path'],
|
||||
PageNum, idx_dict['type'],
|
||||
idx_dict['ptrack'][PageNum])
|
||||
)
|
||||
@ -437,14 +487,15 @@ class ProbackupTest(object):
|
||||
if idx_dict['ptrack'][PageNum] != 1:
|
||||
if self.verbose:
|
||||
print(
|
||||
'Page Number {0} of type {1} was changed,'
|
||||
' but ptrack value is {2}. THIS IS BAD'.format(
|
||||
'File: {0}\n Page Number {1} of type {2} was changed,'
|
||||
' but ptrack value is {3}. THIS IS BAD'.format(
|
||||
idx_dict['path'],
|
||||
PageNum, idx_dict['type'],
|
||||
idx_dict['ptrack'][PageNum])
|
||||
)
|
||||
print(
|
||||
"\n Old checksumm: {0}\n"
|
||||
" New checksumm: {1}".format(
|
||||
" Old checksumm: {0}\n"
|
||||
" New checksumm: {1}".format(
|
||||
idx_dict['old_pages'][PageNum],
|
||||
idx_dict['new_pages'][PageNum])
|
||||
)
|
||||
@ -463,19 +514,17 @@ class ProbackupTest(object):
|
||||
if idx_dict['ptrack'][PageNum] != 0:
|
||||
if self.verbose:
|
||||
print(
|
||||
'Page Number {0} of type {1} was not changed,'
|
||||
' but ptrack value is {2}'.format(
|
||||
'File: {0}\n Page Number {1} of type {2} was not changed,'
|
||||
' but ptrack value is {3}'.format(
|
||||
idx_dict['path'],
|
||||
PageNum, idx_dict['type'],
|
||||
idx_dict['ptrack'][PageNum]
|
||||
)
|
||||
)
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack does not correspond to state'
|
||||
' of its own pages.\n Gory Details: \n{0}'.format(
|
||||
idx_dict['type'], idx_dict
|
||||
)
|
||||
)
|
||||
return success
|
||||
# self.assertTrue(
|
||||
# success, 'Ptrack has failed to register changes in data files'
|
||||
# )
|
||||
|
||||
def check_ptrack_recovery(self, idx_dict):
|
||||
size = idx_dict['size']
|
||||
@ -507,13 +556,22 @@ class ProbackupTest(object):
|
||||
)
|
||||
)
|
||||
|
||||
def run_pb(self, command, async=False, gdb=False):
|
||||
def run_pb(self, command, async=False, gdb=False, old_binary=False):
|
||||
if not self.probackup_old_path and old_binary:
|
||||
print("PGPROBACKUPBIN_OLD is not set")
|
||||
exit(1)
|
||||
|
||||
if old_binary:
|
||||
binary_path = self.probackup_old_path
|
||||
else:
|
||||
binary_path = self.probackup_path
|
||||
|
||||
try:
|
||||
self.cmd = [' '.join(map(str, [self.probackup_path] + command))]
|
||||
self.cmd = [' '.join(map(str, [binary_path] + command))]
|
||||
if self.verbose:
|
||||
print(self.cmd)
|
||||
if gdb:
|
||||
return GDBobj([self.probackup_path] + command, self.verbose)
|
||||
return GDBobj([binary_path] + command, self.verbose)
|
||||
if async:
|
||||
return subprocess.Popen(
|
||||
self.cmd,
|
||||
@ -523,7 +581,7 @@ class ProbackupTest(object):
|
||||
)
|
||||
else:
|
||||
self.output = subprocess.check_output(
|
||||
[self.probackup_path] + command,
|
||||
[binary_path] + command,
|
||||
stderr=subprocess.STDOUT,
|
||||
env=self.test_env
|
||||
).decode("utf-8")
|
||||
@ -559,37 +617,45 @@ class ProbackupTest(object):
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise ProbackupException(e.output.decode("utf-8"), command)
|
||||
|
||||
def init_pb(self, backup_dir):
|
||||
def init_pb(self, backup_dir, old_binary=False):
|
||||
|
||||
shutil.rmtree(backup_dir, ignore_errors=True)
|
||||
|
||||
return self.run_pb([
|
||||
"init",
|
||||
"-B", backup_dir
|
||||
])
|
||||
],
|
||||
old_binary=old_binary
|
||||
)
|
||||
|
||||
def add_instance(self, backup_dir, instance, node):
|
||||
def add_instance(self, backup_dir, instance, node, old_binary=False):
|
||||
|
||||
return self.run_pb([
|
||||
"add-instance",
|
||||
"--instance={0}".format(instance),
|
||||
"-B", backup_dir,
|
||||
"-D", node.data_dir
|
||||
])
|
||||
],
|
||||
old_binary=old_binary
|
||||
)
|
||||
|
||||
def del_instance(self, backup_dir, instance):
|
||||
def del_instance(self, backup_dir, instance, old_binary=False):
|
||||
|
||||
return self.run_pb([
|
||||
"del-instance",
|
||||
"--instance={0}".format(instance),
|
||||
"-B", backup_dir
|
||||
])
|
||||
],
|
||||
old_binary=old_binary
|
||||
)
|
||||
|
||||
def clean_pb(self, backup_dir):
|
||||
shutil.rmtree(backup_dir, ignore_errors=True)
|
||||
|
||||
def backup_node(
|
||||
self, backup_dir, instance, node, data_dir=False,
|
||||
backup_type="full", options=[], async=False, gdb=False
|
||||
backup_type="full", options=[], async=False, gdb=False,
|
||||
old_binary=False
|
||||
):
|
||||
if not node and not data_dir:
|
||||
print('You must provide ether node or data_dir for backup')
|
||||
@ -612,9 +678,11 @@ class ProbackupTest(object):
|
||||
if backup_type:
|
||||
cmd_list += ["-b", backup_type]
|
||||
|
||||
return self.run_pb(cmd_list + options, async, gdb)
|
||||
return self.run_pb(cmd_list + options, async, gdb, old_binary)
|
||||
|
||||
def merge_backup(self, backup_dir, instance, backup_id):
|
||||
def merge_backup(
|
||||
self, backup_dir, instance, backup_id, async=False,
|
||||
gdb=False, old_binary=False, options=[]):
|
||||
cmd_list = [
|
||||
"merge",
|
||||
"-B", backup_dir,
|
||||
@ -622,11 +690,11 @@ class ProbackupTest(object):
|
||||
"-i", backup_id
|
||||
]
|
||||
|
||||
return self.run_pb(cmd_list)
|
||||
return self.run_pb(cmd_list + options, async, gdb, old_binary)
|
||||
|
||||
def restore_node(
|
||||
self, backup_dir, instance, node=False,
|
||||
data_dir=None, backup_id=None, options=[]
|
||||
data_dir=None, backup_id=None, old_binary=False, options=[]
|
||||
):
|
||||
if data_dir is None:
|
||||
data_dir = node.data_dir
|
||||
@ -640,11 +708,11 @@ class ProbackupTest(object):
|
||||
if backup_id:
|
||||
cmd_list += ["-i", backup_id]
|
||||
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
def show_pb(
|
||||
self, backup_dir, instance=None, backup_id=None,
|
||||
options=[], as_text=False, as_json=True
|
||||
options=[], as_text=False, as_json=True, old_binary=False
|
||||
):
|
||||
|
||||
backup_list = []
|
||||
@ -664,11 +732,11 @@ class ProbackupTest(object):
|
||||
|
||||
if as_text:
|
||||
# You should print it when calling as_text=true
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
# get show result as list of lines
|
||||
if as_json:
|
||||
data = json.loads(self.run_pb(cmd_list + options))
|
||||
data = json.loads(self.run_pb(cmd_list + options, old_binary=old_binary))
|
||||
# print(data)
|
||||
for instance_data in data:
|
||||
# find specific instance if requested
|
||||
@ -684,7 +752,8 @@ class ProbackupTest(object):
|
||||
backup_list.append(backup)
|
||||
return backup_list
|
||||
else:
|
||||
show_splitted = self.run_pb(cmd_list + options).splitlines()
|
||||
show_splitted = self.run_pb(
|
||||
cmd_list + options, old_binary=old_binary).splitlines()
|
||||
if instance is not None and backup_id is None:
|
||||
# cut header(ID, Mode, etc) from show as single string
|
||||
header = show_splitted[1:2][0]
|
||||
@ -739,7 +808,7 @@ class ProbackupTest(object):
|
||||
|
||||
def validate_pb(
|
||||
self, backup_dir, instance=None,
|
||||
backup_id=None, options=[]
|
||||
backup_id=None, options=[], old_binary=False
|
||||
):
|
||||
|
||||
cmd_list = [
|
||||
@ -751,9 +820,11 @@ class ProbackupTest(object):
|
||||
if backup_id:
|
||||
cmd_list += ["-i", backup_id]
|
||||
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
def delete_pb(self, backup_dir, instance, backup_id=None, options=[]):
|
||||
def delete_pb(
|
||||
self, backup_dir, instance,
|
||||
backup_id=None, options=[], old_binary=False):
|
||||
cmd_list = [
|
||||
"delete",
|
||||
"-B", backup_dir
|
||||
@ -763,24 +834,26 @@ class ProbackupTest(object):
|
||||
if backup_id:
|
||||
cmd_list += ["-i", backup_id]
|
||||
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
def delete_expired(self, backup_dir, instance, options=[]):
|
||||
def delete_expired(
|
||||
self, backup_dir, instance, options=[], old_binary=False):
|
||||
cmd_list = [
|
||||
"delete", "--expired", "--wal",
|
||||
"-B", backup_dir,
|
||||
"--instance={0}".format(instance)
|
||||
]
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
def show_config(self, backup_dir, instance):
|
||||
def show_config(self, backup_dir, instance, old_binary=False):
|
||||
out_dict = {}
|
||||
cmd_list = [
|
||||
"show-config",
|
||||
"-B", backup_dir,
|
||||
"--instance={0}".format(instance)
|
||||
]
|
||||
res = self.run_pb(cmd_list).splitlines()
|
||||
|
||||
res = self.run_pb(cmd_list, old_binary=old_binary).splitlines()
|
||||
for line in res:
|
||||
if not line.startswith('#'):
|
||||
name, var = line.partition(" = ")[::2]
|
||||
@ -801,7 +874,8 @@ class ProbackupTest(object):
|
||||
return out_dict
|
||||
|
||||
def set_archiving(
|
||||
self, backup_dir, instance, node, replica=False, overwrite=False):
|
||||
self, backup_dir, instance, node, replica=False, overwrite=False, compress=False,
|
||||
old_binary=False):
|
||||
|
||||
if replica:
|
||||
archive_mode = 'always'
|
||||
@ -821,7 +895,7 @@ class ProbackupTest(object):
|
||||
self.probackup_path, backup_dir, instance)
|
||||
|
||||
if os.name == 'posix':
|
||||
if self.archive_compress:
|
||||
if self.archive_compress or compress:
|
||||
archive_command = archive_command + "--compress "
|
||||
|
||||
if overwrite:
|
||||
@ -937,7 +1011,27 @@ class ProbackupTest(object):
|
||||
node.execute("select pg_switch_wal()")
|
||||
else:
|
||||
node.execute("select pg_switch_xlog()")
|
||||
sleep(1)
|
||||
|
||||
def wait_until_replica_catch_with_master(self, master, replica):
|
||||
|
||||
if self.version_to_num(
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"show server_version")) >= self.version_to_num('10.0'):
|
||||
master_function = 'pg_catalog.pg_current_wal_lsn()'
|
||||
replica_function = 'pg_catalog.pg_last_wal_replay_lsn()'
|
||||
else:
|
||||
master_function = 'pg_catalog.pg_current_xlog_location()'
|
||||
replica_function = 'pg_catalog.pg_last_xlog_replay_location()'
|
||||
|
||||
lsn = master.safe_psql(
|
||||
'postgres',
|
||||
'SELECT {0}'.format(master_function)).rstrip()
|
||||
|
||||
# Wait until replica catch up with master
|
||||
replica.poll_query_until(
|
||||
'postgres',
|
||||
"SELECT '{0}'::pg_lsn <= {1}".format(lsn, replica_function))
|
||||
|
||||
def get_version(self, node):
|
||||
return self.version_to_num(
|
||||
|
169
tests/merge.py
169
tests/merge.py
@ -69,7 +69,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertEqual(show_backup["backup-mode"], "PAGE")
|
||||
|
||||
# Merge all backups
|
||||
self.merge_backup(backup_dir, "node", page_id)
|
||||
self.merge_backup(backup_dir, "node", page_id,
|
||||
options=["-j", "4"])
|
||||
show_backups = self.show_pb(backup_dir, "node")
|
||||
|
||||
# sanity check
|
||||
@ -375,7 +376,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '300s',
|
||||
'autovacuum': 'off'
|
||||
'autovacuum': 'off',
|
||||
'ptrack_enable': 'on'
|
||||
}
|
||||
)
|
||||
node_restored = self.make_simple_node(
|
||||
@ -410,7 +412,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
"vacuum t_heap")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta')
|
||||
backup_dir, 'node', node, backup_type='ptrack')
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -452,3 +454,164 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_merge_delta_delete(self):
|
||||
"""
|
||||
Make node, create tablespace with table, take full backup,
|
||||
alter tablespace location, take delta backup, merge full and delta,
|
||||
restore database.
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True, initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s',
|
||||
'autovacuum': 'off'
|
||||
}
|
||||
)
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# FULL backup
|
||||
self.backup_node(backup_dir, 'node', node, options=["--stream"])
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap tablespace somedata as select i as id,"
|
||||
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
||||
" from generate_series(0,100) i"
|
||||
)
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"delete from t_heap"
|
||||
)
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"vacuum t_heap"
|
||||
)
|
||||
|
||||
# DELTA BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='delta',
|
||||
options=["--stream"]
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
backup_id = self.show_pb(backup_dir, "node")[1]["id"]
|
||||
self.merge_backup(backup_dir, "node", backup_id, options=["-j", "4"])
|
||||
|
||||
# RESTORE
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname)
|
||||
)
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=[
|
||||
"-j", "4",
|
||||
"-T", "{0}={1}".format(
|
||||
self.get_tblspace_path(node, 'somedata'),
|
||||
self.get_tblspace_path(node_restored, 'somedata')
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
# GET RESTORED PGDATA AND COMPARE
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
node_restored.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
|
||||
node_restored.start()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_continue_failed_merge(self):
|
||||
"""
|
||||
Check that failed MERGE can be continued
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True, initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica'
|
||||
}
|
||||
)
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
# FULL backup
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id,"
|
||||
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
||||
" from generate_series(0,1000) i"
|
||||
)
|
||||
|
||||
# DELTA BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta'
|
||||
)
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"delete from t_heap"
|
||||
)
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"vacuum t_heap"
|
||||
)
|
||||
|
||||
# DELTA BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta'
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
backup_id = self.show_pb(backup_dir, "node")[2]["id"]
|
||||
|
||||
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
||||
|
||||
gdb.set_breakpoint('move_file')
|
||||
gdb.run_until_break()
|
||||
|
||||
if gdb.continue_execution_until_break(20) != 'breakpoint-hit':
|
||||
print('Failed to hit breakpoint')
|
||||
exit(1)
|
||||
|
||||
gdb._execute('signal SIGKILL')
|
||||
|
||||
print(self.show_pb(backup_dir, as_text=True, as_json=False))
|
||||
|
||||
# Try to continue failed MERGE
|
||||
self.merge_backup(backup_dir, "node", backup_id)
|
||||
|
377
tests/page.py
377
tests/page.py
@ -149,7 +149,10 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
page_result = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
page_backup_id = self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page', options=['--stream'])
|
||||
backup_type='page', options=['--stream', '-j', '4'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
# Drop Node
|
||||
node.cleanup()
|
||||
@ -162,6 +165,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_id=full_backup_id, options=["-j", "4"]),
|
||||
'\n Unexpected Error Message: {0}\n'
|
||||
' CMD: {1}'.format(repr(self.output), self.cmd))
|
||||
|
||||
node.slow_start()
|
||||
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
@ -175,6 +179,12 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_id=page_backup_id, options=["-j", "4"]),
|
||||
'\n Unexpected Error Message: {0}\n'
|
||||
' CMD: {1}'.format(repr(self.output), self.cmd))
|
||||
|
||||
# GET RESTORED PGDATA AND COMPARE
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.slow_start()
|
||||
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(page_result, page_result_new)
|
||||
@ -211,7 +221,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(i::text)::tsvector as tsvector from generate_series(0,1) i")
|
||||
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
|
||||
full_result = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
full_backup_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='full')
|
||||
@ -221,10 +231,14 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"insert into t_heap select i as id, "
|
||||
"md5(i::text) as text, md5(i::text)::tsvector as tsvector "
|
||||
"from generate_series(0,2) i")
|
||||
"from generate_series(100, 200) i")
|
||||
page_result = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
page_backup_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page', options=["-j", "4"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
# Drop Node
|
||||
node.cleanup()
|
||||
@ -241,6 +255,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.slow_start()
|
||||
|
||||
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
@ -259,6 +274,12 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
# GET RESTORED PGDATA AND COMPARE
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.slow_start()
|
||||
|
||||
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
@ -639,3 +660,351 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
node.cleanup()
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_page_backup_with_lost_wal_segment(self):
|
||||
"""
|
||||
make node with archiving
|
||||
make archive backup, then generate some wals with pgbench,
|
||||
delete latest archived wal segment
|
||||
run page backup, expecting error because of missing wal segment
|
||||
make sure that backup status is 'ERROR'
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
# make some wals
|
||||
node.pgbench_init(scale=3)
|
||||
|
||||
# delete last wal segment
|
||||
wals_dir = os.path.join(backup_dir, 'wal', 'node')
|
||||
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
|
||||
wals_dir, f)) and not f.endswith('.backup')]
|
||||
wals = map(str, wals)
|
||||
file = os.path.join(wals_dir, max(wals))
|
||||
os.remove(file)
|
||||
if self.archive_compress:
|
||||
file = file[:-3]
|
||||
|
||||
# Single-thread PAGE backup
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page')
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of wal segment disappearance.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
self.output, self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
'INFO: Wait for LSN' in e.message and
|
||||
'in archived WAL segment' in e.message and
|
||||
'could not read WAL record at' in e.message and
|
||||
'WAL segment "{0}" is absent\n'.format(
|
||||
file) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertEqual(
|
||||
'ERROR',
|
||||
self.show_pb(backup_dir, 'node')[1]['status'],
|
||||
'Backup {0} should have STATUS "ERROR"')
|
||||
|
||||
# Multi-thread PAGE backup
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page',
|
||||
options=["-j", "4", '--log-level-file=verbose'])
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of wal segment disappearance.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
self.output, self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
'INFO: Wait for LSN' in e.message and
|
||||
'in archived WAL segment' in e.message and
|
||||
'could not read WAL record at' in e.message and
|
||||
'WAL segment "{0}" is absent\n'.format(
|
||||
file) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertEqual(
|
||||
'ERROR',
|
||||
self.show_pb(backup_dir, 'node')[2]['status'],
|
||||
'Backup {0} should have STATUS "ERROR"')
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_page_backup_with_corrupted_wal_segment(self):
|
||||
"""
|
||||
make node with archiving
|
||||
make archive backup, then generate some wals with pgbench,
|
||||
corrupt latest archived wal segment
|
||||
run page backup, expecting error because of missing wal segment
|
||||
make sure that backup status is 'ERROR'
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
# make some wals
|
||||
node.pgbench_init(scale=3)
|
||||
|
||||
# delete last wal segment
|
||||
wals_dir = os.path.join(backup_dir, 'wal', 'node')
|
||||
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
|
||||
wals_dir, f)) and not f.endswith('.backup')]
|
||||
wals = map(str, wals)
|
||||
# file = os.path.join(wals_dir, max(wals))
|
||||
file = os.path.join(wals_dir, '000000010000000000000004')
|
||||
print(file)
|
||||
with open(file, "rb+", 0) as f:
|
||||
f.seek(42)
|
||||
f.write(b"blah")
|
||||
f.flush()
|
||||
f.close
|
||||
|
||||
if self.archive_compress:
|
||||
file = file[:-3]
|
||||
|
||||
# Single-thread PAGE backup
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page', options=['--log-level-file=verbose'])
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of wal segment disappearance.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
self.output, self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
'INFO: Wait for LSN' in e.message and
|
||||
'in archived WAL segment' in e.message and
|
||||
'could not read WAL record at' in e.message and
|
||||
'incorrect resource manager data checksum in record at' in e.message and
|
||||
'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
|
||||
file) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertEqual(
|
||||
'ERROR',
|
||||
self.show_pb(backup_dir, 'node')[1]['status'],
|
||||
'Backup {0} should have STATUS "ERROR"')
|
||||
|
||||
# Multi-thread PAGE backup
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page', options=["-j", "4"])
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of wal segment disappearance.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
self.output, self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
'INFO: Wait for LSN' in e.message and
|
||||
'in archived WAL segment' in e.message and
|
||||
'could not read WAL record at' in e.message and
|
||||
'incorrect resource manager data checksum in record at' in e.message and
|
||||
'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
|
||||
file) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertEqual(
|
||||
'ERROR',
|
||||
self.show_pb(backup_dir, 'node')[2]['status'],
|
||||
'Backup {0} should have STATUS "ERROR"')
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_page_backup_with_alien_wal_segment(self):
|
||||
"""
|
||||
make two nodes with archiving
|
||||
take archive full backup from both nodes,
|
||||
generate some wals with pgbench on both nodes,
|
||||
move latest archived wal segment from second node to first node`s archive
|
||||
run page backup on first node
|
||||
expecting error because of alien wal segment
|
||||
make sure that backup status is 'ERROR'
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
alien_node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/alien_node".format(module_name, fname)
|
||||
)
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.add_instance(backup_dir, 'alien_node', alien_node)
|
||||
self.set_archiving(backup_dir, 'alien_node', alien_node)
|
||||
alien_node.start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
self.backup_node(backup_dir, 'alien_node', alien_node)
|
||||
|
||||
# make some wals
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,1000) i;")
|
||||
|
||||
alien_node.safe_psql(
|
||||
"postgres",
|
||||
"create database alien")
|
||||
|
||||
alien_node.safe_psql(
|
||||
"alien",
|
||||
"create sequence t_seq; "
|
||||
"create table t_heap_alien as select i as id, "
|
||||
"md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,1000) i;")
|
||||
|
||||
# copy lastest wal segment
|
||||
wals_dir = os.path.join(backup_dir, 'wal', 'alien_node')
|
||||
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
|
||||
wals_dir, f)) and not f.endswith('.backup')]
|
||||
wals = map(str, wals)
|
||||
filename = max(wals)
|
||||
file = os.path.join(wals_dir, filename)
|
||||
file_destination = os.path.join(
|
||||
os.path.join(backup_dir, 'wal', 'node'), filename)
|
||||
# file = os.path.join(wals_dir, '000000010000000000000004')
|
||||
print(file)
|
||||
print(file_destination)
|
||||
os.rename(file, file_destination)
|
||||
|
||||
if self.archive_compress:
|
||||
file_destination = file_destination[:-3]
|
||||
|
||||
# Single-thread PAGE backup
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page')
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of alien wal segment.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
self.output, self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
'INFO: Wait for LSN' in e.message and
|
||||
'in archived WAL segment' in e.message and
|
||||
'could not read WAL record at' in e.message and
|
||||
'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
|
||||
file_destination) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertEqual(
|
||||
'ERROR',
|
||||
self.show_pb(backup_dir, 'node')[1]['status'],
|
||||
'Backup {0} should have STATUS "ERROR"')
|
||||
|
||||
# Multi-thread PAGE backup
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page', options=["-j", "4"])
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of alien wal segment.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
self.output, self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
'INFO: Wait for LSN' in e.message and
|
||||
'in archived WAL segment' in e.message and
|
||||
'could not read WAL record at' in e.message and
|
||||
'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
|
||||
file_destination) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertEqual(
|
||||
'ERROR',
|
||||
self.show_pb(backup_dir, 'node')[2]['status'],
|
||||
'Backup {0} should have STATUS "ERROR"')
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_multithread_page_backup_with_toast(self):
|
||||
"""
|
||||
make node, create toast, do multithread PAGE backup
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
# make some wals
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t3 as select i, "
|
||||
"repeat(md5(i::text),5006056) as fat_attr "
|
||||
"from generate_series(0,70) i")
|
||||
|
||||
# Multi-thread PAGE backup
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page', options=["-j", "4"])
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
173
tests/ptrack.py
173
tests/ptrack.py
@ -32,7 +32,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# PTRACK BACKUP
|
||||
try:
|
||||
@ -82,7 +82,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
self.backup_node(backup_dir, 'node', node, options=['--stream'])
|
||||
@ -146,7 +146,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node_restored.cleanup()
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
con = node.connect("postgres")
|
||||
@ -178,7 +178,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -208,7 +208,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node_restored.cleanup()
|
||||
node.start()
|
||||
node.slow_start()
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
@ -275,7 +275,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -308,7 +308,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node_restored.cleanup()
|
||||
node.start()
|
||||
node.slow_start()
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
node.safe_psql(
|
||||
@ -367,7 +367,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -397,7 +397,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node_restored.cleanup()
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
@ -437,7 +437,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
# Logical comparison
|
||||
self.assertEqual(
|
||||
@ -468,7 +468,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
@ -509,7 +509,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
node.slow_start()
|
||||
# Logical comparison
|
||||
self.assertEqual(
|
||||
result,
|
||||
@ -541,7 +541,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql("postgres", "create sequence t_seq")
|
||||
@ -637,7 +637,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
@ -743,7 +743,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
@ -827,7 +827,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
@ -898,7 +898,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
@ -978,7 +978,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
@ -1027,7 +1027,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
# DROP DATABASE DB1
|
||||
node.safe_psql(
|
||||
@ -1057,7 +1057,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
try:
|
||||
node_restored.safe_psql('db1', 'select 1')
|
||||
@ -1078,6 +1078,110 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_create_db_on_replica(self):
|
||||
"""
|
||||
Make node, take full backup, create replica from it,
|
||||
take full backup from replica,
|
||||
create database db1, take ptrack backup from replica,
|
||||
restore database and check it presense
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s',
|
||||
'ptrack_enable': 'on',
|
||||
'autovacuum': 'off'
|
||||
}
|
||||
)
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
|
||||
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10'])
|
||||
|
||||
self.restore_node(backup_dir, 'node', replica)
|
||||
|
||||
# Add replica
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(node, replica, 'replica', synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(node.port)
|
||||
]
|
||||
)
|
||||
|
||||
# CREATE DATABASE DB1
|
||||
node.safe_psql("postgres", "create database db1")
|
||||
node.safe_psql(
|
||||
"db1",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
|
||||
|
||||
# Wait until replica catch up with master
|
||||
self.wait_until_replica_catch_with_master(node, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# PTRACK BACKUP
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica',
|
||||
replica, backup_type='ptrack',
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(node.port)
|
||||
]
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(replica.data_dir)
|
||||
|
||||
# RESTORE
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname)
|
||||
)
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'replica', node_restored,
|
||||
backup_id=backup_id, options=["-j", "4"])
|
||||
|
||||
# COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_alter_table_set_tablespace_ptrack(self):
|
||||
"""Make node, create tablespace with table, take full backup,
|
||||
@ -1098,7 +1202,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
@ -1195,7 +1299,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
self.backup_node(backup_dir, 'node', node, options=["--stream"])
|
||||
@ -1237,7 +1341,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
node_restored.start()
|
||||
node_restored.port = node.port
|
||||
node_restored.slow_start()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -1265,7 +1370,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
@ -1307,7 +1412,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node.cleanup()
|
||||
shutil.rmtree(tblspace, ignore_errors=True)
|
||||
self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
tblspc_exist = node.safe_psql(
|
||||
"postgres",
|
||||
@ -1347,7 +1452,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
tblspc_path = self.get_tblspace_path(node, 'somedata')
|
||||
@ -1463,7 +1568,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
@ -1481,6 +1586,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
pgbench = node.pgbench(options=['-T', '150', '-c', '2', '--no-vacuum'])
|
||||
pgbench.wait()
|
||||
|
||||
node.safe_psql("postgres", "checkpoint")
|
||||
|
||||
idx_ptrack['new_size'] = self.get_fork_size(
|
||||
@ -1495,10 +1601,15 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node,
|
||||
idx_ptrack['path']
|
||||
)
|
||||
self.check_ptrack_sanity(idx_ptrack)
|
||||
|
||||
if not self.check_ptrack_sanity(idx_ptrack):
|
||||
self.assertTrue(
|
||||
False, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# GET LOGICAL CONTENT FROM NODE
|
||||
result = node.safe_psql("postgres", "select * from pgbench_accounts")
|
||||
# it`s stupid, because hint`s are ignored by ptrack
|
||||
#result = node.safe_psql("postgres", "select * from pgbench_accounts")
|
||||
# FIRTS PTRACK BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
@ -1538,7 +1649,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
# COMPARE RESTORED FILES
|
||||
self.assertEqual(result, result_new, 'data is lost')
|
||||
#self.assertEqual(result, result_new, 'data is lost')
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -1562,7 +1673,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# Take FULL backup to clean every ptrack
|
||||
self.backup_node(
|
||||
|
@ -33,11 +33,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, nextval('t_seq') as t_seq, "
|
||||
"md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,256) i")
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql(
|
||||
@ -151,10 +152,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,256) i")
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql(
|
||||
|
@ -14,10 +14,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_cluster_on_btree(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
@ -28,11 +33,19 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, nextval('t_seq') as t_seq, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
@ -46,12 +59,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
node.safe_psql('postgres', 'cluster t_heap using t_btree')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -62,10 +78,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -73,10 +95,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_cluster_on_gist(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
@ -85,11 +112,18 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
@ -103,12 +137,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
node.safe_psql('postgres', 'cluster t_heap using t_gist')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -119,10 +156,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# Compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -130,10 +173,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_cluster_on_btree_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
@ -141,7 +189,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -154,15 +203,30 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, options=[
|
||||
'-j10', '--stream', '--master-host=localhost',
|
||||
'--master-db=postgres', '--master-port={0}'.format(
|
||||
master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -172,13 +236,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
master.safe_psql('postgres', 'cluster t_heap using t_btree')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -189,21 +256,32 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
#@unittest.skip("skip")
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_cluster_on_gist_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
@ -211,7 +289,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -224,15 +303,34 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, options=[
|
||||
'-j10', '--stream', '--master-host=localhost',
|
||||
'--master-db=postgres', '--master-port={0}'.format(
|
||||
master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -242,13 +340,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
master.safe_psql('postgres', 'cluster t_heap using t_gist')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -259,10 +360,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# Compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
183
tests/ptrack_empty.py
Normal file
183
tests/ptrack_empty.py
Normal file
@ -0,0 +1,183 @@
|
||||
import os
|
||||
import unittest
|
||||
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
|
||||
import time
|
||||
|
||||
|
||||
module_name = 'ptrack_clean'
|
||||
|
||||
|
||||
class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_clean(self):
|
||||
"""Take backups of every available types and check that PTRACK is clean"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'autovacuum': 'off'})
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap "
|
||||
"(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector) "
|
||||
"tablespace somedata")
|
||||
|
||||
# Take FULL backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
options=['-j10', '--stream'])
|
||||
|
||||
# Create indexes
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
node_restored.cleanup()
|
||||
|
||||
tblspace1 = self.get_tblspace_path(node, 'somedata')
|
||||
tblspace2 = self.get_tblspace_path(node_restored, 'somedata')
|
||||
|
||||
# Take PTRACK backup
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['-j10', '--log-level-file=verbose'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
backup_id=backup_id,
|
||||
options=[
|
||||
"-j", "4",
|
||||
"-T{0}={1}".format(tblspace1, tblspace2)]
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_clean_replica(self):
|
||||
"""Take backups of every available types from master and check that PTRACK on replica is clean"""
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
|
||||
# Create table
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap "
|
||||
"(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector)")
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
# Take FULL backup
|
||||
self.backup_node(
|
||||
backup_dir,
|
||||
'replica',
|
||||
replica,
|
||||
options=[
|
||||
'-j10', '--stream',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
# Create indexes
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
node_restored.cleanup()
|
||||
|
||||
# Take PTRACK backup
|
||||
backup_id = self.backup_node(
|
||||
backup_dir,
|
||||
'replica',
|
||||
replica,
|
||||
backup_type='ptrack',
|
||||
options=[
|
||||
'-j10', '--stream',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(replica.data_dir)
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'replica', node_restored,
|
||||
backup_id=backup_id,
|
||||
options=["-j", "4"]
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
@ -12,10 +12,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_recovery(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
@ -24,19 +29,31 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
node.safe_psql("postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text,md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text,md5(repeat(i::text,10))::tsvector as "
|
||||
"tsvector from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
# Move table and indexes and make checkpoint
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] == 'heap':
|
||||
node.safe_psql('postgres', 'alter table {0} set tablespace somedata;'.format(i))
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'alter table {0} set tablespace somedata;'.format(i))
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql('postgres', 'alter index {0} set tablespace somedata'.format(i))
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'alter index {0} set tablespace somedata'.format(i))
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Check ptrack files
|
||||
|
@ -13,10 +13,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_recovery(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
@ -25,13 +30,22 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table
|
||||
node.safe_psql("postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text,md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
# Create indexes
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres", "create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['size'] = int(self.get_fork_size(node, i))
|
||||
|
@ -8,29 +8,43 @@ module_name = 'ptrack_truncate'
|
||||
|
||||
class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
@unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_truncate(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'truncate t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
@ -45,7 +59,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
@ -54,21 +70,28 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
@unittest.skip("skip")
|
||||
def test_ptrack_truncate_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -76,18 +99,27 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, 'replica', synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Create table and indexes
|
||||
self.create_tblspace_in_node(master, 'somedata')
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres", "create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
replica.safe_psql('postgres', 'truncate t_heap')
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
for i in idx_ptrack:
|
||||
@ -100,17 +132,28 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream'])
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Delete some rows, vacuum it and make checkpoint
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'truncate t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes and calculate it in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -121,10 +164,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -12,25 +12,39 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
@ -45,7 +59,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
@ -56,6 +71,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes and calculate it in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -66,10 +83,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -77,18 +100,25 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_vacuum_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -96,20 +126,43 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, 'replica', synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Make FULL backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, options=[
|
||||
'-j10', '--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get fork size and calculate it in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -119,20 +172,17 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make FULL backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Delete some rows, vacuum it and make checkpoint
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes and calculate it in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -143,10 +193,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -12,28 +12,48 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_bits_frozen(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'vacuum freeze t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
|
||||
@ -43,11 +63,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'vacuum freeze t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -58,10 +75,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -69,18 +92,24 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_vacuum_bits_frozen_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -88,19 +117,39 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -110,13 +159,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'vacuum freeze t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -127,10 +178,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -12,28 +12,45 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_bits_visibility(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
|
||||
@ -43,11 +60,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -58,10 +75,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -1,6 +1,7 @@
|
||||
import os
|
||||
import unittest
|
||||
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
|
||||
import time
|
||||
|
||||
|
||||
module_name = 'ptrack_vacuum_full'
|
||||
@ -12,29 +13,44 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_full(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,127) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres", "create index {0} on {1} "
|
||||
"using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
|
||||
@ -44,12 +60,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
node.safe_psql('postgres', 'vacuum full t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -60,10 +76,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity, the most important part
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -72,17 +94,23 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_full_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on', 'wal_level': 'replica',
|
||||
'max_wal_senders': '2', 'autovacuum': 'off',
|
||||
'checkpoint_timeout': '30s'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -90,20 +118,42 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, 'replica', synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,127) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector as "
|
||||
"tsvector from generate_series(0,256000) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Take FULL backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)
|
||||
]
|
||||
)
|
||||
# TODO: check that all ptrack are nullified
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -113,14 +163,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Take FULL backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
master.safe_psql('postgres', 'vacuum full t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -131,10 +183,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity, the most important part
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -12,29 +12,44 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_truncate(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres", "create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
|
||||
@ -44,12 +59,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id > 128;')
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -60,31 +75,43 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_truncate_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -92,20 +119,38 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, 'replica', synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres", "create index {0} on {1} "
|
||||
"using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)
|
||||
]
|
||||
)
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -115,14 +160,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id > 128;')
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -133,10 +176,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -1241,3 +1241,202 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_zags_block_corrupt(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
conn = node.connect()
|
||||
with node.connect("postgres") as conn:
|
||||
|
||||
conn.execute(
|
||||
"create table tbl(i int)")
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"create index idx ON tbl (i)")
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"insert into tbl select i from generate_series(0,400) as i")
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"select pg_relation_size('idx')")
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"delete from tbl where i < 100")
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"explain analyze select i from tbl order by i")
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"select i from tbl order by i")
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"create extension pageinspect")
|
||||
conn.commit()
|
||||
print(conn.execute(
|
||||
"select * from bt_page_stats('idx',1)"))
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"insert into tbl select i from generate_series(0,100) as i")
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"insert into tbl select i from generate_series(0,100) as i")
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"insert into tbl select i from generate_series(0,100) as i")
|
||||
conn.commit()
|
||||
conn.execute(
|
||||
"insert into tbl select i from generate_series(0,100) as i")
|
||||
|
||||
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored)
|
||||
|
||||
node_restored.append_conf("postgresql.auto.conf", "archive_mode = 'off'")
|
||||
node_restored.append_conf("postgresql.auto.conf", "hot_standby = 'on'")
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
|
||||
node_restored.slow_start()
|
||||
|
||||
@unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_zags_block_corrupt_1(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'autovacuum': 'off',
|
||||
'full_page_writes': 'on'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
node.safe_psql('postgres', 'create table tbl(i int)')
|
||||
|
||||
node.safe_psql('postgres', 'create index idx ON tbl (i)')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into tbl select i from generate_series(0,100000) as i')
|
||||
|
||||
print(node.safe_psql(
|
||||
'postgres',
|
||||
"select pg_relation_size('idx')"))
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'delete from tbl where i%2 = 0')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'explain analyze select i from tbl order by i')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'select i from tbl order by i')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'create extension pageinspect')
|
||||
|
||||
print(node.safe_psql(
|
||||
'postgres',
|
||||
"select * from bt_page_stats('idx',1)"))
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'checkpoint')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into tbl select i from generate_series(0,100) as i')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into tbl select i from generate_series(0,100) as i')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into tbl select i from generate_series(0,100) as i')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into tbl select i from generate_series(0,100) as i')
|
||||
|
||||
self.switch_wal_segment(node)
|
||||
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored)
|
||||
|
||||
node_restored.append_conf("postgresql.auto.conf", "archive_mode = 'off'")
|
||||
node_restored.append_conf("postgresql.auto.conf", "hot_standby = 'on'")
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
|
||||
node_restored.slow_start()
|
||||
|
||||
while True:
|
||||
with open(node_restored.pg_log_file, 'r') as f:
|
||||
if 'selected new timeline ID' in f.read():
|
||||
break
|
||||
|
||||
with open(node_restored.pg_log_file, 'r') as f:
|
||||
print(f.read())
|
||||
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# pg_xlogdump_path = self.get_bin_path('pg_xlogdump')
|
||||
|
||||
# pg_xlogdump = self.run_binary(
|
||||
# [
|
||||
# pg_xlogdump_path, '-b',
|
||||
# os.path.join(backup_dir, 'wal', 'node', '000000010000000000000003'),
|
||||
# ' | ', 'grep', 'Btree', ''
|
||||
# ], async=False)
|
||||
|
||||
if pg_xlogdump.returncode:
|
||||
self.assertFalse(
|
||||
True,
|
||||
'Failed to start pg_wal_dump: {0}'.format(
|
||||
pg_receivexlog.communicate()[1]))
|
||||
|
@ -28,7 +28,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertEqual(
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["--log-level-console=panic"]),
|
||||
options=["--log-level-console=off"]),
|
||||
None
|
||||
)
|
||||
self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True))
|
||||
@ -56,7 +56,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertEqual(
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["--log-level-console=panic"]),
|
||||
options=["--log-level-console=off"]),
|
||||
None
|
||||
)
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
@ -135,7 +135,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
backup_id, "backup.control")
|
||||
os.remove(file)
|
||||
|
||||
self.assertIn('control file "{0}" doesn\'t exist'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
|
||||
self.assertIn('Control file "{0}" doesn\'t exist'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -165,7 +165,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
fd = open(file, 'w')
|
||||
fd.close()
|
||||
|
||||
self.assertIn('control file "{0}" is empty'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
|
||||
self.assertIn('Control file "{0}" is empty'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user