1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-04-11 11:52:02 +02:00

Merge branch 'master' into issue_90

This commit is contained in:
Grigory Smolkin 2019-08-12 19:17:41 +03:00
commit a0dc029d32
52 changed files with 4762 additions and 1601 deletions

File diff suppressed because it is too large Load Diff

View File

@ -33,6 +33,8 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp
* `Autonomous backups` streams via replication protocol all the WAL files required to restore the cluster to a consistent state at the time the backup was taken. Even if continuous archiving is not set up, the required WAL segments are included into the backup.
* `Archive backups` rely on continuous archiving.
## ptrack support
`PTRACK` backup support provided via following options:
* vanilla PostgreSQL compiled with ptrack patch. Currently there are patches for [PostgreSQL 9.6](https://gist.githubusercontent.com/gsmol/5b615c971dfd461c76ef41a118ff4d97/raw/e471251983f14e980041f43bea7709b8246f4178/ptrack_9.6.6_v1.5.patch) and [PostgreSQL 10](https://gist.githubusercontent.com/gsmol/be8ee2a132b88463821021fd910d960e/raw/de24f9499f4f314a4a3e5fae5ed4edb945964df8/ptrack_10.1_v1.5.patch)
* Postgres Pro Standard 9.5, 9.6, 10, 11
@ -45,22 +47,26 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp
* Remote mode is in beta stage.
* Incremental chain can span only within one timeline. So if you have backup incremental chain taken from replica and it gets promoted, you would be forced to take another FULL backup.
## Current release
[2.1.5](https://github.com/postgrespro/pg_probackup/releases/tag/2.1.5)
## Installation and Setup
### Windows Installation
[Installers download link](https://oc.postgrespro.ru/index.php/s/CGsjXlc5NmhRI0L)
Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/tag/2.1.5).
### Linux Installation
```shell
#DEB Ubuntu|Debian Packages
echo "deb [arch=amd64] http://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list
wget -O - http://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | apt-key add - && apt-get update
apt-get install pg-probackup-{11,10,9.6,9.5}
apt-get install pg-probackup-{11,10,9.6,9.5}-dbg
sudo echo "deb [arch=amd64] http://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list
sudo wget -O - http://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update
sudo apt-get install pg-probackup-{11,10,9.6,9.5}
sudo apt-get install pg-probackup-{11,10,9.6,9.5}-dbg
#DEB-SRC Packages
echo "deb-src [arch=amd64] http://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\
sudo echo "deb-src [arch=amd64] http://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\
/etc/apt/sources.list.d/pg_probackup.list
apt-get source pg-probackup-{11,10,9.6,9.5}
sudo apt-get source pg-probackup-{11,10,9.6,9.5}
#RPM Centos Packages
rpm -ivh http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm
@ -81,7 +87,7 @@ yum install pg_probackup-{11,10,9.6,9.5}-debuginfo
yumdownloader --source pg_probackup-{11,10,9.6,9.5}
```
Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.com/docs/postgrespro/current/app-pgprobackup.html#pg-probackup-install-and-setup).
Once you have `pg_probackup` installed, complete [the setup](https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md#installation-and-setup).
## Building from source
### Linux
@ -104,17 +110,14 @@ SET PATH=%PATH%;C:\msys64\usr\bin
gen_probackup_project.pl C:\path_to_postgresql_source_tree
```
## Current release
[2.1.3](https://github.com/postgrespro/pg_probackup/releases/tag/2.1.3)
## Documentation
Currently the latest documentation can be found at [github](https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md) and [Postgres Pro Enterprise documentation](https://postgrespro.com/docs/postgrespro/current/app-pgprobackup).
Currently the latest documentation can be found at [github](https://postgrespro.github.io/pg_probackup).
Slightly outdated documentation can be found at [Postgres Pro Enterprise documentation](https://postgrespro.com/docs/postgrespro/current/app-pgprobackup).
## Licence
## License
This module available under the [license](LICENSE) similar to [PostgreSQL](https://www.postgresql.org/about/licence/).
This module available under the [license](LICENSE) similar to [PostgreSQL](https://www.postgresql.org/about/license/).
## Feedback

View File

@ -1,3 +0,0 @@
CALL "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall" amd64
SET PERL5LIB=.
perl gen_probackup_project.pl C:\Shared\Postgresql\myPostgres\11\postgrespro

View File

@ -1 +0,0 @@
perl win32build96.pl "C:\PgPro96" "C:\PgProject\pg96ee\postgrespro\src"

View File

@ -5,7 +5,7 @@ our $pgsrc;
our $currpath;
BEGIN {
# path to the pg_pprobackup dir
# path to the pg_probackup dir
$currpath = File::Basename::dirname(Cwd::abs_path($0));
use Cwd;
use File::Basename;
@ -155,7 +155,8 @@ sub build_pgprobackup
'restore.c',
'show.c',
'util.c',
'validate.c'
'validate.c',
'checkdb.c'
);
$probackup->AddFiles(
"$currpath/src/utils",
@ -166,7 +167,8 @@ sub build_pgprobackup
'logger.c',
'parray.c',
'pgut.c',
'thread.c'
'thread.c',
'remote.c'
);
$probackup->AddFile("$pgsrc/src/backend/access/transam/xlogreader.c");
$probackup->AddFile("$pgsrc/src/backend/utils/hash/pg_crc.c");

View File

@ -28,7 +28,7 @@
/*
* Macro needed to parse ptrack.
* NOTE Keep those values syncronised with definitions in ptrack.h
* NOTE Keep those values synchronized with definitions in ptrack.h
*/
#define PTRACK_BITS_PER_HEAPBLOCK 1
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
@ -39,7 +39,7 @@ static XLogRecPtr stop_stream_lsn = InvalidXLogRecPtr;
/*
* How long we should wait for streaming end in seconds.
* Retreived as checkpoint_timeout + checkpoint_timeout * 0.1
* Retrieved as checkpoint_timeout + checkpoint_timeout * 0.1
*/
static uint32 stream_stop_timeout = 0;
/* Time in which we started to wait for streaming end */
@ -78,10 +78,6 @@ static int is_ptrack_enable = false;
bool is_ptrack_support = false;
bool exclusive_backup = false;
/* PostgreSQL server version from "backup_conn" */
static int server_version = 0;
static char server_version_str[100] = "";
/* Is pg_start_backup() was executed */
static bool backup_in_progress = false;
/* Is pg_stop_backup() was sent */
@ -94,23 +90,24 @@ static void backup_cleanup(bool fatal, void *userdata);
static void *backup_files(void *arg);
static void do_backup_instance(PGconn *backup_conn);
static void do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo);
static void pg_start_backup(const char *label, bool smooth, pgBackup *backup,
PGconn *backup_conn, PGconn *master_conn);
PGNodeInfo *nodeInfo, PGconn *backup_conn, PGconn *master_conn);
static void pg_switch_wal(PGconn *conn);
static void pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn);
static void pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo);
static int checkpoint_timeout(PGconn *backup_conn);
//static void backup_list_file(parray *files, const char *root, )
static XLogRecPtr wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn,
bool wait_prev_segment);
static void wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup, PGconn *backup_conn);
static void make_pagemap_from_ptrack(parray* files, PGconn* backup_conn);
static void *StreamLog(void *arg);
static void IdentifySystem(StreamThreadArg *stream_thread_arg);
static void check_external_for_tablespaces(parray *external_list,
PGconn *backup_conn);
static parray *get_database_map(PGconn *pg_startbackup_conn);
/* Ptrack functions */
static void pg_ptrack_clear(PGconn *backup_conn);
@ -128,7 +125,8 @@ static XLogRecPtr get_last_ptrack_lsn(PGconn *backup_conn);
/* Check functions */
static bool pg_checksum_enable(PGconn *conn);
static bool pg_is_in_recovery(PGconn *conn);
static void check_server_version(PGconn *conn);
static bool pg_is_superuser(PGconn *conn);
static void check_server_version(PGconn *conn, PGNodeInfo *nodeInfo);
static void confirm_block_size(PGconn *conn, const char *name, int blcksz);
static void set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i);
@ -142,7 +140,7 @@ backup_stopbackup_callback(bool fatal, void *userdata)
if (backup_in_progress)
{
elog(WARNING, "backup in progress, stop backup");
pg_stop_backup(NULL, pg_startbackup_conn); /* don't care stop_lsn on error case */
pg_stop_backup(NULL, pg_startbackup_conn, NULL); /* don't care about stop_lsn in case of error */
}
}
@ -151,7 +149,7 @@ backup_stopbackup_callback(bool fatal, void *userdata)
* Move files from 'pgdata' to a subdirectory in 'backup_path'.
*/
static void
do_backup_instance(PGconn *backup_conn)
do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo)
{
int i;
char database_path[MAXPGPATH];
@ -169,6 +167,7 @@ do_backup_instance(PGconn *backup_conn)
parray *prev_backup_filelist = NULL;
parray *backup_list = NULL;
parray *external_dirs = NULL;
parray *database_map = NULL;
pgFile *pg_control = NULL;
PGconn *master_conn = NULL;
@ -198,10 +197,11 @@ do_backup_instance(PGconn *backup_conn)
/* get list of backups already taken */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
prev_backup = catalog_get_last_data_backup(backup_list, current.tli);
prev_backup = catalog_get_last_data_backup(backup_list, current.tli, current.start_time);
if (prev_backup == NULL)
elog(ERROR, "Valid backup on current timeline is not found. "
"Create new FULL backup before an incremental one.");
elog(ERROR, "Valid backup on current timeline %X is not found. "
"Create new FULL backup before an incremental one.",
current.tli);
pgBackupGetPath(prev_backup, prev_backup_filelist_path,
lengthof(prev_backup_filelist_path), DATABASE_FILE_LIST);
@ -254,10 +254,12 @@ do_backup_instance(PGconn *backup_conn)
else
pg_startbackup_conn = backup_conn;
pg_start_backup(label, smooth_checkpoint, &current,
backup_conn, pg_startbackup_conn);
pg_start_backup(label, smooth_checkpoint, &current, nodeInfo, backup_conn, pg_startbackup_conn);
/* For incremental backup check that start_lsn is not from the past */
/* For incremental backup check that start_lsn is not from the past
* Though it will not save us if PostgreSQL instance is actually
* restored STREAM backup.
*/
if (current.backup_mode != BACKUP_MODE_FULL &&
prev_backup->start_lsn > current.start_lsn)
elog(ERROR, "Current START LSN %X/%X is lower than START LSN %X/%X of previous backup %s. "
@ -293,30 +295,10 @@ do_backup_instance(PGconn *backup_conn)
instance_config.conn_opt.pgport,
instance_config.conn_opt.pgdatabase,
instance_config.conn_opt.pguser);
/* sanity */
IdentifySystem(&stream_thread_arg);
if (!CheckServerVersionForStreaming(stream_thread_arg.conn))
{
PQfinish(stream_thread_arg.conn);
/*
* Error message already written in CheckServerVersionForStreaming().
* There's no hope of recovering from a version mismatch, so don't
* retry.
*/
elog(ERROR, "Cannot continue backup because stream connect has failed.");
}
/*
* Identify server, obtaining start LSN position and current timeline ID
* at the same time, necessary if not valid data can be found in the
* existing output directory.
*/
if (!RunIdentifySystem(stream_thread_arg.conn, NULL, NULL, NULL, NULL))
{
PQfinish(stream_thread_arg.conn);
elog(ERROR, "Cannot continue backup because stream connect has failed.");
}
/* By default there are some error */
/* By default there are some error */
stream_thread_arg.ret = 1;
/* we must use startpos as start_lsn from start_backup */
stream_thread_arg.startpos = current.start_lsn;
@ -333,6 +315,12 @@ do_backup_instance(PGconn *backup_conn)
dir_list_file(backup_files_list, instance_config.pgdata,
true, true, false, 0, FIO_DB_HOST);
/*
* Get database_map (name to oid) for use in partial restore feature.
* It's possible that we fail and database_map will be NULL.
*/
database_map = get_database_map(pg_startbackup_conn);
/*
* Append to backup list all files and directories
* from external directory option
@ -474,7 +462,7 @@ do_backup_instance(PGconn *backup_conn)
/* Run threads */
thread_interrupted = false;
elog(INFO, "Start transfering data files");
elog(INFO, "Start transferring data files");
for (i = 0; i < num_threads; i++)
{
backup_files_arg *arg = &(threads_args[i]);
@ -491,7 +479,7 @@ do_backup_instance(PGconn *backup_conn)
backup_isok = false;
}
if (backup_isok)
elog(INFO, "Data files are transfered");
elog(INFO, "Data files are transferred");
else
elog(ERROR, "Data files transferring failed");
@ -516,7 +504,7 @@ do_backup_instance(PGconn *backup_conn)
}
/* Notify end of backup */
pg_stop_backup(&current, pg_startbackup_conn);
pg_stop_backup(&current, pg_startbackup_conn, nodeInfo);
/* In case of backup from replica >= 9.6 we must fix minRecPoint,
* First we must find pg_control in backup_files_list.
@ -526,7 +514,7 @@ do_backup_instance(PGconn *backup_conn)
char pg_control_path[MAXPGPATH];
snprintf(pg_control_path, sizeof(pg_control_path), "%s/%s",
instance_config.pgdata, "global/pg_control");
instance_config.pgdata, XLOG_CONTROL_FILE);
for (i = 0; i < parray_num(backup_files_list); i++)
{
@ -584,6 +572,15 @@ do_backup_instance(PGconn *backup_conn)
parray_free(xlog_files_list);
}
/* write database map to file and add it to control file */
if (database_map)
{
write_database_map(&current, database_map, backup_files_list);
/* cleanup */
parray_walk(database_map, db_map_entry_free);
parray_free(database_map);
}
/* Print the list of files to backup catalog */
write_backup_filelist(&current, backup_files_list, instance_config.pgdata,
external_dirs);
@ -635,11 +632,12 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo)
confirm_block_size(cur_conn, "wal_block_size", XLOG_BLCKSZ);
nodeInfo->block_size = BLCKSZ;
nodeInfo->wal_block_size = XLOG_BLCKSZ;
nodeInfo->is_superuser = pg_is_superuser(cur_conn);
current.from_replica = pg_is_in_recovery(cur_conn);
/* Confirm that this server version is supported */
check_server_version(cur_conn);
check_server_version(cur_conn, nodeInfo);
if (pg_checksum_enable(cur_conn))
current.checksum_version = 1;
@ -656,11 +654,12 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo)
"pg_probackup have no way to detect data block corruption without them. "
"Reinitialize PGDATA with option '--data-checksums'.");
StrNCpy(current.server_version, server_version_str,
sizeof(current.server_version));
if (nodeInfo->is_superuser)
elog(WARNING, "Current PostgreSQL role is superuser. "
"It is not recommended to run backup or checkdb as superuser.");
StrNCpy(nodeInfo->server_version, server_version_str,
sizeof(nodeInfo->server_version));
StrNCpy(current.server_version, nodeInfo->server_version_str,
sizeof(current.server_version));
return cur_conn;
}
@ -672,16 +671,60 @@ int
do_backup(time_t start_time, bool no_validate)
{
PGconn *backup_conn = NULL;
PGNodeInfo nodeInfo;
char pretty_data_bytes[20];
/* Initialize PGInfonode */
pgNodeInit(&nodeInfo);
if (!instance_config.pgdata)
elog(ERROR, "required parameter not specified: PGDATA "
"(-D, --pgdata)");
/* Update backup status and other metainfo. */
current.status = BACKUP_STATUS_RUNNING;
current.start_time = start_time;
StrNCpy(current.program_version, PROGRAM_VERSION,
sizeof(current.program_version));
current.compress_alg = instance_config.compress_alg;
current.compress_level = instance_config.compress_level;
/* Save list of external directories */
if (instance_config.external_dir_str &&
pg_strcasecmp(instance_config.external_dir_str, "none") != 0)
{
current.external_dir_str = instance_config.external_dir_str;
}
elog(INFO, "Backup start, pg_probackup version: %s, instance: %s, backup ID: %s, backup mode: %s, "
"wal-method: %s, remote: %s, compress-algorithm: %s, compress-level: %i",
PROGRAM_VERSION, instance_name, base36enc(start_time), pgBackupGetBackupMode(&current),
current.stream ? "STREAM" : "ARCHIVE", IsSshProtocol() ? "true" : "false",
deparse_compress_alg(current.compress_alg), current.compress_level);
/* Create backup directory and BACKUP_CONTROL_FILE */
if (pgBackupCreateDir(&current))
elog(ERROR, "Cannot create backup directory");
if (!lock_backup(&current))
elog(ERROR, "Cannot lock backup %s directory",
base36enc(current.start_time));
write_backup(&current);
/* set the error processing function for the backup process */
pgut_atexit_push(backup_cleanup, NULL);
elog(LOG, "Backup destination is initialized");
/*
* setup backup_conn, do some compatibility checks and
* fill basic info about instance
*/
backup_conn = pgdata_basic_setup(instance_config.conn_opt,
&(current.nodeInfo));
backup_conn = pgdata_basic_setup(instance_config.conn_opt, &nodeInfo);
if (current.from_replica)
elog(INFO, "Backup %s is going to be taken from standby", base36enc(start_time));
/*
* Ensure that backup directory was initialized for the same PostgreSQL
* instance we opened connection to. And that target backup database PGDATA
@ -692,14 +735,9 @@ do_backup(time_t start_time, bool no_validate)
/* below perform checks specific for backup command */
#if PG_VERSION_NUM >= 110000
if (!RetrieveWalSegSize(backup_conn))
elog(ERROR, "Failed to retreive wal_segment_size");
elog(ERROR, "Failed to retrieve wal_segment_size");
#endif
current.compress_alg = instance_config.compress_alg;
current.compress_level = instance_config.compress_level;
current.stream = stream_wal;
is_ptrack_support = pg_ptrack_support(backup_conn);
if (is_ptrack_support)
{
@ -722,34 +760,8 @@ do_backup(time_t start_time, bool no_validate)
if (instance_config.master_conn_opt.pghost == NULL)
elog(ERROR, "Options for connection to master must be provided to perform backup from replica");
/* Start backup. Update backup status. */
current.status = BACKUP_STATUS_RUNNING;
current.start_time = start_time;
StrNCpy(current.program_version, PROGRAM_VERSION,
sizeof(current.program_version));
/* Save list of external directories */
if (instance_config.external_dir_str &&
pg_strcasecmp(instance_config.external_dir_str, "none") != 0)
{
current.external_dir_str = instance_config.external_dir_str;
}
/* Create backup directory and BACKUP_CONTROL_FILE */
if (pgBackupCreateDir(&current))
elog(ERROR, "Cannot create backup directory");
if (!lock_backup(&current))
elog(ERROR, "Cannot lock backup %s directory",
base36enc(current.start_time));
write_backup(&current);
elog(LOG, "Backup destination is initialized");
/* set the error processing function for the backup process */
pgut_atexit_push(backup_cleanup, NULL);
/* backup data */
do_backup_instance(backup_conn);
do_backup_instance(backup_conn, &nodeInfo);
pgut_atexit_pop(backup_cleanup, NULL);
/* compute size of wal files of this backup stored in the archive */
@ -765,11 +777,12 @@ do_backup(time_t start_time, bool no_validate)
current.status = BACKUP_STATUS_DONE;
write_backup(&current);
//elog(LOG, "Backup completed. Total bytes : " INT64_FORMAT "",
// current.data_bytes);
if (!no_validate)
pgBackupValidate(&current);
pgBackupValidate(&current, NULL);
/* Notify user about backup size */
pretty_size(current.data_bytes, pretty_data_bytes, lengthof(pretty_data_bytes));
elog(INFO, "Backup %s real size: %s", base36enc(current.start_time), pretty_data_bytes);
if (current.status == BACKUP_STATUS_OK ||
current.status == BACKUP_STATUS_DONE)
@ -791,34 +804,35 @@ do_backup(time_t start_time, bool no_validate)
* Confirm that this server version is supported
*/
static void
check_server_version(PGconn *conn)
check_server_version(PGconn *conn, PGNodeInfo *nodeInfo)
{
PGresult *res;
/* confirm server version */
server_version = PQserverVersion(conn);
nodeInfo->server_version = PQserverVersion(conn);
if (server_version == 0)
elog(ERROR, "Unknown server version %d", server_version);
if (nodeInfo->server_version == 0)
elog(ERROR, "Unknown server version %d", nodeInfo->server_version);
if (server_version < 100000)
sprintf(server_version_str, "%d.%d",
server_version / 10000,
(server_version / 100) % 100);
if (nodeInfo->server_version < 100000)
sprintf(nodeInfo->server_version_str, "%d.%d",
nodeInfo->server_version / 10000,
(nodeInfo->server_version / 100) % 100);
else
sprintf(server_version_str, "%d",
server_version / 10000);
sprintf(nodeInfo->server_version_str, "%d",
nodeInfo->server_version / 10000);
if (server_version < 90500)
if (nodeInfo->server_version < 90500)
elog(ERROR,
"server version is %s, must be %s or higher",
server_version_str, "9.5");
nodeInfo->server_version_str, "9.5");
if (current.from_replica && server_version < 90600)
if (current.from_replica && nodeInfo->server_version < 90600)
elog(ERROR,
"server version is %s, must be %s or higher for backup from replica",
server_version_str, "9.6");
nodeInfo->server_version_str, "9.6");
/* TODO: search pg_proc for pgpro_edition before calling */
res = pgut_execute_extended(conn, "SELECT pgpro_edition()",
0, NULL, true, true);
@ -831,29 +845,29 @@ check_server_version(PGconn *conn)
/* It seems we connected to PostgreSQL (not Postgres Pro) */
elog(ERROR, "%s was built with Postgres Pro %s %s, "
"but connection is made with PostgreSQL %s",
PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION, server_version_str);
else if (strcmp(server_version_str, PG_MAJORVERSION) != 0 &&
PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION, nodeInfo->server_version_str);
else if (strcmp(nodeInfo->server_version_str, PG_MAJORVERSION) != 0 &&
strcmp(PQgetvalue(res, 0, 0), PGPRO_EDITION) != 0)
elog(ERROR, "%s was built with Postgres Pro %s %s, "
"but connection is made with Postgres Pro %s %s",
PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION,
server_version_str, PQgetvalue(res, 0, 0));
nodeInfo->server_version_str, PQgetvalue(res, 0, 0));
#else
if (PQresultStatus(res) != PGRES_FATAL_ERROR)
/* It seems we connected to Postgres Pro (not PostgreSQL) */
elog(ERROR, "%s was built with PostgreSQL %s, "
"but connection is made with Postgres Pro %s %s",
PROGRAM_NAME, PG_MAJORVERSION,
server_version_str, PQgetvalue(res, 0, 0));
else if (strcmp(server_version_str, PG_MAJORVERSION) != 0)
nodeInfo->server_version_str, PQgetvalue(res, 0, 0));
else if (strcmp(nodeInfo->server_version_str, PG_MAJORVERSION) != 0)
elog(ERROR, "%s was built with PostgreSQL %s, but connection is made with %s",
PROGRAM_NAME, PG_MAJORVERSION, server_version_str);
PROGRAM_NAME, PG_MAJORVERSION, nodeInfo->server_version_str);
#endif
PQclear(res);
/* Do exclusive backup only for PostgreSQL 9.5 */
exclusive_backup = server_version < 90600 ||
exclusive_backup = nodeInfo->server_version < 90600 ||
current.backup_mode == BACKUP_MODE_DIFF_PTRACK;
}
@ -888,6 +902,7 @@ check_system_identifiers(PGconn *conn, char *pgdata)
elog(ERROR, "Backup data directory was initialized for system id " UINT64_FORMAT ", "
"but connected instance system id is " UINT64_FORMAT,
instance_config.system_identifier, system_id_conn);
if (system_id_pgdata != instance_config.system_identifier)
elog(ERROR, "Backup data directory was initialized for system id " UINT64_FORMAT ", "
"but target backup directory system id is " UINT64_FORMAT,
@ -923,7 +938,7 @@ confirm_block_size(PGconn *conn, const char *name, int blcksz)
*/
static void
pg_start_backup(const char *label, bool smooth, pgBackup *backup,
PGconn *backup_conn, PGconn *pg_startbackup_conn)
PGNodeInfo *nodeInfo, PGconn *backup_conn, PGconn *pg_startbackup_conn)
{
PGresult *res;
const char *params[2];
@ -964,11 +979,14 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup,
PQclear(res);
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE &&
(!(backup->from_replica && !exclusive_backup)))
!backup->from_replica &&
!(nodeInfo->server_version < 90600 &&
!nodeInfo->is_superuser))
/*
* Switch to a new WAL segment. It is necessary to get archived WAL
* segment, which includes start LSN of current backup.
* Don`t do this for replica backups unless it`s PG 9.5
* Don`t do this for replica backups and for PG 9.5 if pguser is not superuser
* (because in 9.5 only superuser can switch WAL)
*/
pg_switch_wal(conn);
@ -983,16 +1001,11 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup,
else if (!stream_wal)
/* ...for others wait for previous segment */
wait_wal_lsn(backup->start_lsn, true, true);
/* In case of backup from replica for PostgreSQL 9.5
* wait for start_lsn to be replayed by replica
*/
if (backup->from_replica && exclusive_backup)
wait_replica_wal_lsn(backup->start_lsn, true, backup_conn);
}
/*
* Switch to a new WAL segment. It should be called only for master.
* For PG 9.5 it should be called only if pguser is superuser.
*/
static void
pg_switch_wal(PGconn *conn)
@ -1004,9 +1017,9 @@ pg_switch_wal(PGconn *conn)
PQclear(res);
#if PG_VERSION_NUM >= 100000
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_wal()", 0, NULL);
res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_wal()", 0, NULL);
#else
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_xlog()", 0, NULL);
res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_xlog()", 0, NULL);
#endif
PQclear(res);
@ -1054,6 +1067,66 @@ pg_ptrack_support(PGconn *backup_conn)
return true;
}
/*
* Fill 'datname to Oid' map
*
* This function can fail to get the map for legal reasons, e.g. missing
* permissions on pg_database during `backup`.
* As long as user do not use partial restore feature it`s fine.
*
* To avoid breaking a backward compatibility don't throw an ERROR,
* throw a warning instead of an error and return NULL.
* Caller is responsible for checking the result.
*/
parray *
get_database_map(PGconn *conn)
{
PGresult *res;
parray *database_map = NULL;
int i;
/*
* Do not include template0 and template1 to the map
* as default databases that must always be restored.
*/
res = pgut_execute_extended(conn,
"SELECT oid, datname FROM pg_catalog.pg_database "
"WHERE datname NOT IN ('template1', 'template0')",
0, NULL, true, true);
/* Don't error out, simply return NULL. See comment above. */
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
PQclear(res);
elog(WARNING, "Failed to get database map: %s",
PQerrorMessage(conn));
return NULL;
}
/* Construct database map */
for (i = 0; i < PQntuples(res); i++)
{
char *datname = NULL;
db_map_entry *db_entry = (db_map_entry *) pgut_malloc(sizeof(db_map_entry));
/* get Oid */
db_entry->dbOid = atoi(PQgetvalue(res, i, 0));
/* get datname */
datname = PQgetvalue(res, i, 1);
db_entry->datname = pgut_malloc(strlen(datname) + 1);
strcpy(db_entry->datname, datname);
if (database_map == NULL)
database_map = parray_new();
parray_append(database_map, db_entry);
}
return database_map;
}
/* Check if ptrack is enabled in target instance */
static bool
pg_ptrack_enable(PGconn *backup_conn)
@ -1079,13 +1152,13 @@ pg_checksum_enable(PGconn *conn)
res_db = pgut_execute(conn, "SHOW data_checksums", 0, NULL);
if (strcmp(PQgetvalue(res_db, 0, 0), "on") != 0)
if (strcmp(PQgetvalue(res_db, 0, 0), "on") == 0)
{
PQclear(res_db);
return false;
return true;
}
PQclear(res_db);
return true;
return false;
}
/* Check if target instance is replica */
@ -1105,6 +1178,24 @@ pg_is_in_recovery(PGconn *conn)
return false;
}
/* Check if current PostgreSQL role is superuser */
static bool
pg_is_superuser(PGconn *conn)
{
PGresult *res;
res = pgut_execute(conn, "SELECT pg_catalog.current_setting('is_superuser')", 0, NULL);
if (strcmp(PQgetvalue(res, 0, 0), "on") == 0)
{
PQclear(res);
return true;
}
PQclear(res);
return false;
}
/* Clear ptrack files in all databases of the instance we connected to */
static void
pg_ptrack_clear(PGconn *backup_conn)
@ -1338,7 +1429,7 @@ wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
tli = get_current_timeline(false);
/* Compute the name of the WAL file containig requested LSN */
/* Compute the name of the WAL file containing requested LSN */
GetXLogSegNo(lsn, targetSegNo, instance_config.xlog_seg_size);
if (wait_prev_segment)
targetSegNo--;
@ -1478,80 +1569,12 @@ wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
}
}
/*
* Wait for target 'lsn' on replica instance from master.
*/
static void
wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup,
PGconn *backup_conn)
{
uint32 try_count = 0;
while (true)
{
XLogRecPtr replica_lsn;
/*
* For lsn from pg_start_backup() we need it to be replayed on replica's
* data.
*/
if (is_start_backup)
{
replica_lsn = get_checkpoint_location(backup_conn);
}
/*
* For lsn from pg_stop_backup() we need it only to be received by
* replica and fsync()'ed on WAL segment.
*/
else
{
PGresult *res;
uint32 lsn_hi;
uint32 lsn_lo;
#if PG_VERSION_NUM >= 100000
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_receive_lsn()",
0, NULL);
#else
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_receive_location()",
0, NULL);
#endif
/* Extract LSN from result */
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
/* Calculate LSN */
replica_lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
PQclear(res);
}
/* target lsn was replicated */
if (replica_lsn >= lsn)
break;
sleep(1);
if (interrupted)
elog(ERROR, "Interrupted during waiting for target LSN");
try_count++;
/* Inform user if target lsn is absent in first attempt */
if (try_count == 1)
elog(INFO, "Wait for target LSN %X/%X to be received by replica",
(uint32) (lsn >> 32), (uint32) lsn);
if (instance_config.replica_timeout > 0 &&
try_count > instance_config.replica_timeout)
elog(ERROR, "Target LSN %X/%X could not be recevied by replica "
"in %d seconds",
(uint32) (lsn >> 32), (uint32) lsn,
instance_config.replica_timeout);
}
}
/*
* Notify end of backup to PostgreSQL server.
*/
static void
pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn)
pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn,
PGNodeInfo *nodeInfo)
{
PGconn *conn;
PGresult *res;
@ -1587,27 +1610,22 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn)
PQclear(res);
/* Create restore point
* only if it`s backup from master, or exclusive replica(wich connects to master)
* Only if backup is from master.
* For PG 9.5 create restore point only if pguser is superuser.
*/
if (backup != NULL && (!current.from_replica || (current.from_replica && exclusive_backup)))
if (backup != NULL && !current.from_replica &&
!(nodeInfo->server_version < 90600 &&
!nodeInfo->is_superuser))
{
const char *params[1];
char name[1024];
if (!current.from_replica)
snprintf(name, lengthof(name), "pg_probackup, backup_id %s",
base36enc(backup->start_time));
else
snprintf(name, lengthof(name), "pg_probackup, backup_id %s. Replica Backup",
base36enc(backup->start_time));
snprintf(name, lengthof(name), "pg_probackup, backup_id %s",
base36enc(backup->start_time));
params[0] = name;
res = pgut_execute(conn, "SELECT pg_catalog.pg_create_restore_point($1)",
1, params);
/* Extract timeline and LSN from the result */
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
/* Calculate LSN */
//restore_lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
PQclear(res);
}
@ -1688,7 +1706,11 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn)
while (1)
{
if (!PQconsumeInput(conn) || PQisBusy(conn))
if (!PQconsumeInput(conn))
elog(ERROR, "pg_stop backup() failed: %s",
PQerrorMessage(conn));
if (PQisBusy(conn))
{
pg_stop_backup_timeout++;
sleep(1);
@ -1754,6 +1776,9 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn)
char *xlog_path,
stream_xlog_path[MAXPGPATH];
elog(WARNING, "Invalid stop_backup_lsn value %X/%X",
(uint32) (stop_backup_lsn >> 32), (uint32) (stop_backup_lsn));
if (stream_wal)
{
pgBackupGetPath2(backup, stream_xlog_path,
@ -1884,10 +1909,6 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn)
char *xlog_path,
stream_xlog_path[MAXPGPATH];
/* Wait for stop_lsn to be received by replica */
/* XXX Do we need this? */
// if (current.from_replica)
// wait_replica_wal_lsn(stop_backup_lsn, false);
/*
* Wait for stop_lsn to be archived or streamed.
* We wait for stop_lsn in stream mode just in case.
@ -1924,7 +1945,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn)
}
/*
* Retreive checkpoint_timeout GUC value in seconds.
* Retrieve checkpoint_timeout GUC value in seconds.
*/
static int
checkpoint_timeout(PGconn *backup_conn)
@ -2422,7 +2443,7 @@ make_pagemap_from_ptrack(parray *files, PGconn *backup_conn)
if (ptrack_nonparsed != NULL)
{
/*
* pg_ptrack_get_and_clear() returns ptrack with VARHDR cutted out.
* pg_ptrack_get_and_clear() returns ptrack with VARHDR cut out.
* Compute the beginning of the ptrack map related to this segment
*
* HEAPBLOCKS_PER_BYTE. Number of heap pages one ptrack byte can track: 8
@ -2552,7 +2573,6 @@ StreamLog(void *arg)
stream_arg->startpos -= stream_arg->startpos % instance_config.xlog_seg_size;
/* Initialize timeout */
stream_stop_timeout = 0;
stream_stop_begin = 0;
#if PG_VERSION_NUM >= 100000
@ -2572,7 +2592,7 @@ StreamLog(void *arg)
/*
* Start the replication
*/
elog(LOG, _("started streaming WAL at %X/%X (timeline %u)"),
elog(LOG, "started streaming WAL at %X/%X (timeline %u)",
(uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos,
stream_arg->starttli);
@ -2613,13 +2633,13 @@ StreamLog(void *arg)
#endif
}
#else
if(ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli, NULL,
(char *) stream_arg->basedir, stop_streaming,
standby_message_timeout, NULL, false, false) == false)
if(ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli,
NULL, (char *) stream_arg->basedir, stop_streaming,
standby_message_timeout, NULL, false, false) == false)
elog(ERROR, "Problem in receivexlog");
#endif
elog(LOG, _("finished streaming WAL at %X/%X (timeline %u)"),
elog(LOG, "finished streaming WAL at %X/%X (timeline %u)",
(uint32) (stop_stream_lsn >> 32), (uint32) stop_stream_lsn, stream_arg->starttli);
stream_arg->ret = 0;
@ -2787,3 +2807,62 @@ check_external_for_tablespaces(parray *external_list, PGconn *backup_conn)
}
}
}
/*
* Run IDENTIFY_SYSTEM through a given connection and
* check system identifier and timeline are matching
*/
void
IdentifySystem(StreamThreadArg *stream_thread_arg)
{
PGresult *res;
uint64 stream_conn_sysidentifier = 0;
char *stream_conn_sysidentifier_str;
TimeLineID stream_conn_tli = 0;
if (!CheckServerVersionForStreaming(stream_thread_arg->conn))
{
PQfinish(stream_thread_arg->conn);
/*
* Error message already written in CheckServerVersionForStreaming().
* There's no hope of recovering from a version mismatch, so don't
* retry.
*/
elog(ERROR, "Cannot continue backup because stream connect has failed.");
}
/*
* Identify server, obtain server system identifier and timeline
*/
res = pgut_execute(stream_thread_arg->conn, "IDENTIFY_SYSTEM", 0, NULL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
elog(WARNING,"Could not send replication command \"%s\": %s",
"IDENTIFY_SYSTEM", PQerrorMessage(stream_thread_arg->conn));
PQfinish(stream_thread_arg->conn);
elog(ERROR, "Cannot continue backup because stream connect has failed.");
}
stream_conn_sysidentifier_str = PQgetvalue(res, 0, 0);
stream_conn_tli = atoi(PQgetvalue(res, 0, 1));
/* Additional sanity, primary for PG 9.5,
* where system id can be obtained only via "IDENTIFY SYSTEM"
*/
if (!parse_uint64(stream_conn_sysidentifier_str, &stream_conn_sysidentifier, 0))
elog(ERROR, "%s is not system_identifier", stream_conn_sysidentifier_str);
if (stream_conn_sysidentifier != instance_config.system_identifier)
elog(ERROR, "System identifier mismatch. Connected PostgreSQL instance has system id: "
"" UINT64_FORMAT ". Expected: " UINT64_FORMAT ".",
stream_conn_sysidentifier, instance_config.system_identifier);
if (stream_conn_tli != current.tli)
elog(ERROR, "Timeline identifier mismatch. "
"Connected PostgreSQL instance has timeline id: %X. Expected: %X.",
stream_conn_tli, current.tli);
PQclear(res);
}

View File

@ -260,7 +260,7 @@ lock_backup(pgBackup *backup)
fio_unlink(lock_file, FIO_BACKUP_HOST);
errno = save_errno;
elog(ERROR, "Culd not write lock file \"%s\": %s",
elog(ERROR, "Could not write lock file \"%s\": %s",
lock_file, strerror(errno));
}
@ -441,22 +441,88 @@ catalog_lock_backup_list(parray *backup_list, int from_idx, int to_idx)
}
/*
* Find the last completed backup on given timeline
* Find the latest valid child of latest valid FULL backup on given timeline
*/
pgBackup *
catalog_get_last_data_backup(parray *backup_list, TimeLineID tli)
catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current_start_time)
{
int i;
pgBackup *backup = NULL;
pgBackup *full_backup = NULL;
pgBackup *tmp_backup = NULL;
char *invalid_backup_id;
/* backup_list is sorted in order of descending ID */
for (i = 0; i < parray_num(backup_list); i++)
{
backup = (pgBackup *) parray_get(backup_list, (size_t) i);
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
if ((backup->backup_mode == BACKUP_MODE_FULL &&
(backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE)) && backup->tli == tli)
{
full_backup = backup;
break;
}
}
/* Failed to find valid FULL backup to fulfill ancestor role */
if (!full_backup)
return NULL;
elog(LOG, "Latest valid FULL backup: %s",
base36enc(full_backup->start_time));
/* FULL backup is found, lets find his latest child */
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
/* only valid descendants are acceptable for evaluation */
if ((backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE) && backup->tli == tli)
return backup;
backup->status == BACKUP_STATUS_DONE))
{
switch (scan_parent_chain(backup, &tmp_backup))
{
/* broken chain */
case 0:
invalid_backup_id = base36enc_dup(tmp_backup->parent_backup);
elog(WARNING, "Backup %s has missing parent: %s. Cannot be a parent",
base36enc(backup->start_time), invalid_backup_id);
pg_free(invalid_backup_id);
continue;
/* chain is intact, but at least one parent is invalid */
case 1:
invalid_backup_id = base36enc_dup(tmp_backup->start_time);
elog(WARNING, "Backup %s has invalid parent: %s. Cannot be a parent",
base36enc(backup->start_time), invalid_backup_id);
pg_free(invalid_backup_id);
continue;
/* chain is ok */
case 2:
/* Yes, we could call is_parent() earlier - after choosing the ancestor,
* but this way we have an opportunity to detect and report all possible
* anomalies.
*/
if (is_parent(full_backup->start_time, backup, true))
{
elog(INFO, "Parent backup: %s",
base36enc(backup->start_time));
return backup;
}
}
}
/* skip yourself */
else if (backup->start_time == current_start_time)
continue;
else
{
elog(WARNING, "Backup %s has status: %s. Cannot be a parent.",
base36enc(backup->start_time), status2str(backup->status));
}
}
return NULL;
@ -678,13 +744,15 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root,
len = sprintf(line, "{\"path\":\"%s\", \"size\":\"" INT64_FORMAT "\", "
"\"mode\":\"%u\", \"is_datafile\":\"%u\", "
"\"is_cfs\":\"%u\", \"crc\":\"%u\", "
"\"compress_alg\":\"%s\", \"external_dir_num\":\"%d\"",
"\"compress_alg\":\"%s\", \"external_dir_num\":\"%d\", "
"\"dbOid\":\"%u\"",
path, file->write_size, file->mode,
file->is_datafile ? 1 : 0,
file->is_cfs ? 1 : 0,
file->crc,
deparse_compress_alg(file->compress_alg),
file->external_dir_num);
file->external_dir_num,
file->dbOid);
if (file->is_datafile)
len += sprintf(line+len, ",\"segno\":\"%d\"", file->segno);
@ -956,7 +1024,7 @@ parse_compress_alg(const char *arg)
len = strlen(arg);
if (len == 0)
elog(ERROR, "compress algrorithm is empty");
elog(ERROR, "compress algorithm is empty");
if (pg_strncasecmp("zlib", arg, len) == 0)
return ZLIB_COMPRESS;
@ -987,6 +1055,22 @@ deparse_compress_alg(int alg)
return NULL;
}
/*
* Fill PGNodeInfo struct with default values.
*/
void
pgNodeInit(PGNodeInfo *node)
{
node->block_size = 0;
node->wal_block_size = 0;
node->checksum_version = 0;
node->is_superuser = false;
node->server_version = 0;
node->server_version_str[0] = '\0';
}
/*
* Fill pgBackup struct with default values.
*/
@ -1149,7 +1233,7 @@ find_parent_full_backup(pgBackup *current_backup)
}
/*
* Interate over parent chain and look for any problems.
* Iterate over parent chain and look for any problems.
* Return 0 if chain is broken.
* result_backup must contain oldest existing backup after missing backup.
* we have no way to know if there are multiple missing backups.
@ -1180,7 +1264,7 @@ scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup)
target_backup = target_backup->parent_backup_link;
}
/* Prevous loop will skip FULL backup because his parent_backup_link is NULL */
/* Previous loop will skip FULL backup because his parent_backup_link is NULL */
if (target_backup->backup_mode == BACKUP_MODE_FULL &&
(target_backup->status != BACKUP_STATUS_OK &&
target_backup->status != BACKUP_STATUS_DONE))

View File

@ -79,8 +79,9 @@ typedef struct pg_indexEntry
{
Oid indexrelid;
char *name;
char *namespace;
bool heapallindexed_is_supported;
/* schema where amcheck extention is located */
/* schema where amcheck extension is located */
char *amcheck_nspname;
/* lock for synchronization of parallel threads */
volatile pg_atomic_flag lock;
@ -98,6 +99,8 @@ pg_indexEntry_free(void *index)
if (index_ptr->name)
free(index_ptr->name);
if (index_ptr->name)
free(index_ptr->namespace);
if (index_ptr->amcheck_nspname)
free(index_ptr->amcheck_nspname);
@ -324,7 +327,7 @@ check_indexes(void *arg)
if (progress)
elog(INFO, "Thread [%d]. Progress: (%d/%d). Amchecking index '%s.%s'",
arguments->thread_num, i + 1, n_indexes,
ind->amcheck_nspname, ind->name);
ind->namespace, ind->name);
if (arguments->conn_arg.conn == NULL)
{
@ -362,7 +365,7 @@ get_index_list(const char *dbname, bool first_db_with_amcheck,
PGconn *db_conn)
{
PGresult *res;
char *nspname = NULL;
char *amcheck_nspname = NULL;
int i;
bool heapallindexed_is_supported = false;
parray *index_list = NULL;
@ -391,8 +394,8 @@ get_index_list(const char *dbname, bool first_db_with_amcheck,
return NULL;
}
nspname = pgut_malloc(strlen(PQgetvalue(res, 0, 1)) + 1);
strcpy(nspname, PQgetvalue(res, 0, 1));
amcheck_nspname = pgut_malloc(strlen(PQgetvalue(res, 0, 1)) + 1);
strcpy(amcheck_nspname, PQgetvalue(res, 0, 1));
/* heapallindexed_is_supported is database specific */
if (strcmp(PQgetvalue(res, 0, 2), "1.0") != 0 &&
@ -405,7 +408,7 @@ get_index_list(const char *dbname, bool first_db_with_amcheck,
PQgetvalue(res, 0, 2), PQgetvalue(res, 0, 1));
if (!heapallindexed_is_supported && heapallindexed)
elog(WARNING, "Extension '%s' verion %s in schema '%s'"
elog(WARNING, "Extension '%s' version %s in schema '%s'"
"do not support 'heapallindexed' option",
PQgetvalue(res, 0, 0), PQgetvalue(res, 0, 2),
PQgetvalue(res, 0, 1));
@ -419,24 +422,28 @@ get_index_list(const char *dbname, bool first_db_with_amcheck,
if (first_db_with_amcheck)
{
res = pgut_execute(db_conn, "SELECT cls.oid, cls.relname "
"FROM pg_index idx "
"JOIN pg_class cls ON idx.indexrelid=cls.oid "
"JOIN pg_am am ON cls.relam=am.oid "
"WHERE am.amname='btree' AND cls.relpersistence != 't'",
res = pgut_execute(db_conn, "SELECT cls.oid, cls.relname, nmspc.nspname "
"FROM pg_catalog.pg_index idx "
"LEFT JOIN pg_catalog.pg_class cls ON idx.indexrelid=cls.oid "
"LEFT JOIN pg_catalog.pg_namespace nmspc ON cls.relnamespace=nmspc.oid "
"LEFT JOIN pg_catalog.pg_am am ON cls.relam=am.oid "
"WHERE am.amname='btree' AND cls.relpersistence != 't' "
"ORDER BY nmspc.nspname DESC",
0, NULL);
}
else
{
res = pgut_execute(db_conn, "SELECT cls.oid, cls.relname "
"FROM pg_index idx "
"JOIN pg_class cls ON idx.indexrelid=cls.oid "
"JOIN pg_am am ON cls.relam=am.oid "
"LEFT JOIN pg_tablespace tbl "
"ON cls.reltablespace=tbl.oid "
"AND tbl.spcname <> 'pg_global' "
"WHERE am.amname='btree' AND cls.relpersistence != 't'",
res = pgut_execute(db_conn, "SELECT cls.oid, cls.relname, nmspc.nspname "
"FROM pg_catalog.pg_index idx "
"LEFT JOIN pg_catalog.pg_class cls ON idx.indexrelid=cls.oid "
"LEFT JOIN pg_catalog.pg_namespace nmspc ON cls.relnamespace=nmspc.oid "
"LEFT JOIN pg_catalog.pg_am am ON cls.relam=am.oid "
"WHERE am.amname='btree' AND cls.relpersistence != 't' AND "
"(cls.reltablespace IN "
"(SELECT oid from pg_catalog.pg_tablespace where spcname <> 'pg_global') "
"OR cls.reltablespace = 0) "
"ORDER BY nmspc.nspname DESC",
0, NULL);
}
@ -445,15 +452,24 @@ get_index_list(const char *dbname, bool first_db_with_amcheck,
{
pg_indexEntry *ind = (pg_indexEntry *) pgut_malloc(sizeof(pg_indexEntry));
char *name = NULL;
char *namespace = NULL;
/* index oid */
ind->indexrelid = atoi(PQgetvalue(res, i, 0));
/* index relname */
name = PQgetvalue(res, i, 1);
ind->name = pgut_malloc(strlen(name) + 1);
strcpy(ind->name, name); /* enough buffer size guaranteed */
/* index namespace */
namespace = PQgetvalue(res, i, 2);
ind->namespace = pgut_malloc(strlen(namespace) + 1);
strcpy(ind->namespace, namespace); /* enough buffer size guaranteed */
ind->heapallindexed_is_supported = heapallindexed_is_supported;
ind->amcheck_nspname = pgut_malloc(strlen(nspname) + 1);
strcpy(ind->amcheck_nspname, nspname);
ind->amcheck_nspname = pgut_malloc(strlen(amcheck_nspname) + 1);
strcpy(ind->amcheck_nspname, amcheck_nspname);
pg_atomic_clear_flag(&ind->lock);
if (index_list == NULL)
@ -509,7 +525,7 @@ amcheck_one_index(check_indexes_arg *arguments,
{
elog(WARNING, "Thread [%d]. Amcheck failed in database '%s' for index: '%s.%s': %s",
arguments->thread_num, arguments->conn_opt.pgdatabase,
ind->amcheck_nspname, ind->name, PQresultErrorMessage(res));
ind->namespace, ind->name, PQresultErrorMessage(res));
pfree(params[0]);
pfree(query);
@ -519,7 +535,7 @@ amcheck_one_index(check_indexes_arg *arguments,
else
elog(LOG, "Thread [%d]. Amcheck succeeded in database '%s' for index: '%s.%s'",
arguments->thread_num,
arguments->conn_opt.pgdatabase, ind->amcheck_nspname, ind->name);
arguments->conn_opt.pgdatabase, ind->namespace, ind->name);
pfree(params[0]);
pfree(query);
@ -633,7 +649,7 @@ do_amcheck(ConnectionOptions conn_opt, PGconn *conn)
if (check_isok)
elog(INFO, "Amcheck succeeded for database '%s'", dbname);
else
elog(WARNING, "Amcheck failed for database %s", dbname);
elog(WARNING, "Amcheck failed for database '%s'", dbname);
parray_walk(index_list, pg_indexEntry_free);
parray_free(index_list);
@ -674,6 +690,9 @@ do_checkdb(bool need_amcheck,
PGNodeInfo nodeInfo;
PGconn *cur_conn;
/* Initialize PGInfonode */
pgNodeInit(&nodeInfo);
if (skip_block_validation && !need_amcheck)
elog(ERROR, "Option '--skip-block-validation' must be used with '--amcheck' option");

View File

@ -159,7 +159,7 @@ page_may_be_compressed(Page page, CompressAlg alg, uint32 backup_version)
return false;
}
#endif
/* otherwize let's try to decompress the page */
/* otherwise let's try to decompress the page */
return true;
}
return false;
@ -396,7 +396,7 @@ prepare_page(ConnectionArgs *arguments,
{
/*
* We need to copy the page that was successfully
* retreieved from ptrack into our output "page" parameter.
* retrieved from ptrack into our output "page" parameter.
* We must set checksum here, because it is outdated
* in the block recieved from shared buffers.
*/
@ -482,7 +482,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
compressed_page, header.compressed_size);
write_buffer_size += MAXALIGN(header.compressed_size);
}
/* Nonpositive value means that compression failed. Write it as is. */
/* Non-positive value means that compression failed. Write it as is. */
else
{
header.compressed_size = BLCKSZ;
@ -754,7 +754,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
DataPage page;
int32 uncompressed_size = 0;
/* File didn`t changed. Nothig to copy */
/* File didn`t changed. Nothing to copy */
if (file->write_size == BYTES_INVALID)
break;
@ -887,7 +887,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
* DELTA backup have no knowledge about truncated blocks as PAGE or PTRACK do
* But during DELTA backup we read every file in PGDATA and thus DELTA backup
* knows exact size of every file at the time of backup.
* So when restoring file from DELTA backup we, knowning it`s size at
* So when restoring file from DELTA backup we, knowing it`s size at
* a time of a backup, can truncate file to this size.
*/
if (allow_truncate && file->n_blocks != BLOCKNUM_INVALID && !need_truncate)
@ -1068,6 +1068,39 @@ copy_file(fio_location from_location, const char *to_root,
return true;
}
/*
* Create empty file, used for partial restore
*/
bool
create_empty_file(fio_location from_location, const char *to_root,
fio_location to_location, pgFile *file)
{
char to_path[MAXPGPATH];
FILE *out;
/* open file for write */
join_path_components(to_path, to_root, file->rel_path);
out = fio_fopen(to_path, PG_BINARY_W, to_location);
if (out == NULL)
{
elog(ERROR, "cannot open destination file \"%s\": %s",
to_path, strerror(errno));
}
/* update file permission */
if (fio_chmod(to_path, file->mode, to_location) == -1)
{
fio_fclose(out);
elog(ERROR, "cannot change mode of \"%s\": %s", to_path,
strerror(errno));
}
if (fio_fclose(out))
elog(ERROR, "cannot close \"%s\": %s", to_path, strerror(errno));
return true;
}
/*
* Validate given page.
*

View File

@ -116,7 +116,7 @@ do_delete(time_t backup_id)
*
* Invalid backups handled in Oracle style, so invalid backups are ignored
* for the purpose of retention fulfillment,
* i.e. CORRUPT full backup do not taken in account when deteremine
* i.e. CORRUPT full backup do not taken in account when determine
* which FULL backup should be keeped for redundancy obligation(only valid do),
* but if invalid backup is not guarded by retention - it is removed
*/
@ -175,6 +175,8 @@ int do_retention(void)
if (delete_wal && !dry_run)
do_retention_wal();
/* TODO: consider dry-run flag */
if (!backup_merged)
elog(INFO, "There are no backups to merge by retention policy");
@ -203,6 +205,8 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
int i;
time_t current_time;
parray *redundancy_full_backup_list = NULL;
/* For retention calculation */
uint32 n_full_backups = 0;
int cur_full_backup_num = 0;
@ -221,15 +225,27 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
/* Consider only valid backups for Redundancy */
/* Consider only valid FULL backups for Redundancy */
if (instance_config.retention_redundancy > 0 &&
backup->backup_mode == BACKUP_MODE_FULL &&
(backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE))
{
n_full_backups++;
/* Add every FULL backup that satisfy Redundancy policy to separate list */
if (n_full_backups <= instance_config.retention_redundancy)
{
if (!redundancy_full_backup_list)
redundancy_full_backup_list = parray_new();
parray_append(redundancy_full_backup_list, backup);
}
}
}
/* Sort list of full backups to keep */
if (redundancy_full_backup_list)
parray_qsort(redundancy_full_backup_list, pgBackupCompareIdDesc);
}
if (instance_config.retention_window > 0)
@ -242,8 +258,21 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
for (i = (int) parray_num(backup_list) - 1; i >= 0; i--)
{
bool redundancy_keep = false;
time_t backup_time = 0;
pgBackup *backup = (pgBackup *) parray_get(backup_list, (size_t) i);
/* check if backup`s FULL ancestor is in redundancy list */
if (redundancy_full_backup_list)
{
pgBackup *full_backup = find_parent_full_backup(backup);
if (full_backup && parray_bsearch(redundancy_full_backup_list,
full_backup,
pgBackupCompareIdDesc))
redundancy_keep = true;
}
/* Remember the serial number of latest valid FULL backup */
if (backup->backup_mode == BACKUP_MODE_FULL &&
(backup->status == BACKUP_STATUS_OK ||
@ -252,11 +281,17 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
cur_full_backup_num++;
}
/* Check if backup in needed by retention policy
* TODO: consider that ERROR backup most likely to have recovery_time == 0
/* Invalid and running backups most likely to have recovery_time == 0,
* so in this case use start_time instead.
*/
if ((days_threshold == 0 || (days_threshold > backup->recovery_time)) &&
(instance_config.retention_redundancy <= (n_full_backups - cur_full_backup_num)))
if (backup->recovery_time)
backup_time = backup->recovery_time;
else
backup_time = backup->start_time;
/* Check if backup in needed by retention policy */
if ((days_threshold == 0 || (days_threshold > backup_time)) &&
(instance_config.retention_redundancy == 0 || !redundancy_keep))
{
/* This backup is not guarded by retention
*
@ -319,7 +354,7 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
backup->parent_backup_link,
pgBackupCompareIdDesc))
{
/* make keep list a bit sparse */
/* make keep list a bit more compact */
parray_append(to_keep_list, backup);
continue;
}
@ -344,6 +379,7 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
else
actual_window = (current_time - backup->recovery_time)/(60 * 60 * 24);
/* TODO: add ancestor(chain full backup) ID */
elog(INFO, "Backup %s, mode: %s, status: %s. Redundancy: %i/%i, Time Window: %ud/%ud. %s",
base36enc(backup->start_time),
pgBackupGetBackupMode(backup),
@ -416,7 +452,7 @@ do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_l
continue;
}
/* FULL backup in purge list, thanks to sparsing of keep_list current backup is
/* FULL backup in purge list, thanks to compacting of keep_list current backup is
* final target for merge, but there could be intermediate incremental
* backups from purge_list.
*/
@ -462,7 +498,7 @@ do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_l
* 2 PAGE1
* 3 FULL
*
* Сonsequentially merge incremental backups from PAGE1 to PAGE3
* Consequentially merge incremental backups from PAGE1 to PAGE3
* into FULL.
*/
@ -593,6 +629,7 @@ do_retention_wal(void)
XLogRecPtr oldest_lsn = InvalidXLogRecPtr;
TimeLineID oldest_tli = 0;
bool backup_list_is_empty = false;
int i;
/* Get list of backups. */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
@ -601,14 +638,17 @@ do_retention_wal(void)
backup_list_is_empty = true;
/* Save LSN and Timeline to remove unnecessary WAL segments */
if (!backup_list_is_empty)
for (i = (int) parray_num(backup_list) - 1; i >= 0; i--)
{
pgBackup *backup = NULL;
/* Get LSN and TLI of oldest alive backup */
backup = (pgBackup *) parray_get(backup_list, parray_num(backup_list) -1);
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
oldest_tli = backup->tli;
oldest_lsn = backup->start_lsn;
/* Get LSN and TLI of the oldest backup with valid start_lsn and tli */
if (backup->tli > 0 && !XLogRecPtrIsInvalid(backup->start_lsn))
{
oldest_tli = backup->tli;
oldest_lsn = backup->start_lsn;
break;
}
}
/* Be paranoid */

149
src/dir.c
View File

@ -249,6 +249,10 @@ delete_file:
}
}
/*
* Read the file to compute its CRC.
* As a handy side effect, we return filesize via bytes_read parameter.
*/
pg_crc32
pgFileGetCRC(const char *file_path, bool use_crc32c, bool raise_on_deleted,
size_t *bytes_read, fio_location location)
@ -441,6 +445,30 @@ BlackListCompare(const void *str1, const void *str2)
return strcmp(*(char **) str1, *(char **) str2);
}
/* Compare two Oids */
int
pgCompareOid(const void *f1, const void *f2)
{
Oid *v1 = *(Oid **) f1;
Oid *v2 = *(Oid **) f2;
if (*v1 > *v2)
return 1;
else if (*v1 < *v2)
return -1;
else
return 0;}
void
db_map_entry_free(void *entry)
{
db_map_entry *m = (db_map_entry *) entry;
free(m->datname);
free(entry);
}
/*
* List files, symbolic links and directories in the directory "root" and add
* pgFile objects to "files". We add "root" to "files" if add_root is true.
@ -1075,7 +1103,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba
}
/*
* Read names of symbolik names of tablespaces with links to directories from
* Read names of symbolic names of tablespaces with links to directories from
* tablespace_map or tablespace_map.txt.
*/
void
@ -1445,7 +1473,8 @@ dir_read_file_list(const char *root, const char *external_prefix,
external_dir_num,
crc,
segno,
n_blocks;
n_blocks,
dbOid; /* used for partial restore */
pgFile *file;
get_control_value(buf, "path", path, NULL, true);
@ -1456,6 +1485,7 @@ dir_read_file_list(const char *root, const char *external_prefix,
get_control_value(buf, "crc", NULL, &crc, true);
get_control_value(buf, "compress_alg", compress_alg_string, NULL, false);
get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false);
get_control_value(buf, "dbOid", NULL, &dbOid, false);
if (external_dir_num && external_prefix)
{
@ -1478,6 +1508,7 @@ dir_read_file_list(const char *root, const char *external_prefix,
file->crc = (pg_crc32) crc;
file->compress_alg = parse_compress_alg(compress_alg_string);
file->external_dir_num = external_dir_num;
file->dbOid = dbOid ? dbOid : 0;
/*
* Optional fields
@ -1568,7 +1599,7 @@ pgFileSize(const char *path)
}
/*
* Construct parray containing remmaped external directories paths
* Construct parray containing remapped external directories paths
* from string like /path1:/path2
*/
parray *
@ -1643,3 +1674,115 @@ backup_contains_external(const char *dir, parray *dirs_list)
search_result = parray_bsearch(dirs_list, dir, BlackListCompare);
return search_result != NULL;
}
/*
* Print database_map
*/
void
print_database_map(FILE *out, parray *database_map)
{
int i;
for (i = 0; i < parray_num(database_map); i++)
{
db_map_entry *db_entry = (db_map_entry *) parray_get(database_map, i);
fio_fprintf(out, "{\"dbOid\":\"%u\", \"datname\":\"%s\"}\n",
db_entry->dbOid, db_entry->datname);
}
}
/*
* Create file 'database_map' and add its meta to backup_files_list
* NULL check for database_map must be done by the caller.
*/
void
write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_list)
{
FILE *fp;
pgFile *file;
char path[MAXPGPATH];
char database_map_path[MAXPGPATH];
pgBackupGetPath(backup, path, lengthof(path), DATABASE_DIR);
join_path_components(database_map_path, path, DATABASE_MAP);
fp = fio_fopen(database_map_path, PG_BINARY_W, FIO_BACKUP_HOST);
if (fp == NULL)
elog(ERROR, "Cannot open file list \"%s\": %s", path,
strerror(errno));
print_database_map(fp, database_map);
if (fio_fflush(fp) || fio_fclose(fp))
{
fio_unlink(database_map_path, FIO_BACKUP_HOST);
elog(ERROR, "Cannot write file list \"%s\": %s",
database_map_path, strerror(errno));
}
/* Add metadata to backup_content.control */
file = pgFileNew(database_map_path, DATABASE_MAP, true, 0,
FIO_BACKUP_HOST);
pfree(file->path);
file->path = strdup(DATABASE_MAP);
file->crc = pgFileGetCRC(database_map_path, true, false,
&file->read_size, FIO_BACKUP_HOST);
file->write_size = file->read_size;
parray_append(backup_files_list, file);
}
/*
* read database map, return NULL if database_map in empty or missing
*/
parray *
read_database_map(pgBackup *backup)
{
FILE *fp;
parray *database_map;
char buf[MAXPGPATH];
char path[MAXPGPATH];
char database_map_path[MAXPGPATH];
pgBackupGetPath(backup, path, lengthof(path), DATABASE_DIR);
join_path_components(database_map_path, path, DATABASE_MAP);
fp = fio_open_stream(database_map_path, FIO_BACKUP_HOST);
if (fp == NULL)
{
/* It is NOT ok for database_map to be missing at this point, so
* we should error here.
* It`s a job of the caller to error if database_map is not empty.
*/
elog(ERROR, "Cannot open \"%s\": %s", database_map_path, strerror(errno));
}
database_map = parray_new();
while (fgets(buf, lengthof(buf), fp))
{
char datname[MAXPGPATH];
int64 dbOid;
db_map_entry *db_entry = (db_map_entry *) pgut_malloc(sizeof(db_map_entry));
get_control_value(buf, "dbOid", NULL, &dbOid, true);
get_control_value(buf, "datname", datname, NULL, true);
db_entry->dbOid = dbOid;
db_entry->datname = pgut_strdup(datname);
parray_append(database_map, db_entry);
}
fio_close_stream(fp);
/* Return NULL if file is empty */
if (parray_num(database_map) == 0)
{
parray_free(database_map);
return NULL;
}
return database_map;
}

View File

@ -143,6 +143,7 @@ help_pg_probackup(void)
printf(_(" [-T OLDDIR=NEWDIR] [--progress]\n"));
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
printf(_(" [--skip-external-dirs]\n"));
printf(_(" [--db-include | --db-exclude]\n"));
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
@ -155,6 +156,7 @@ help_pg_probackup(void)
printf(_(" [--recovery-target-timeline=timeline]\n"));
printf(_(" [--recovery-target-name=target-name]\n"));
printf(_(" [--skip-block-validation]\n"));
printf(_(" [--db-include | --db-exclude]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s checkdb [-B backup-path] [--instance=instance_name]\n"), PROGRAM_NAME);
@ -359,6 +361,7 @@ help_restore(void)
printf(_(" [-T OLDDIR=NEWDIR] [--progress]\n"));
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
printf(_(" [--skip-external-dirs]\n"));
printf(_(" [--db-include dbname | --db-exclude dbname]\n"));
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n\n"));
@ -397,6 +400,10 @@ help_restore(void)
printf(_(" relocate the external directory from OLDDIR to NEWDIR\n"));
printf(_(" --skip-external-dirs do not restore all external directories\n"));
printf(_("\n Partial restore options:\n"));
printf(_(" --db-include dbname restore only specified databases\n"));
printf(_(" --db-exclude dbname do not restore specified databases\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
printf(_(" level for console logging (default: info)\n"));
@ -440,6 +447,7 @@ help_validate(void)
printf(_(" [--recovery-target-timeline=timeline]\n"));
printf(_(" [--recovery-target-name=target-name]\n"));
printf(_(" [--skip-block-validation]\n\n"));
printf(_(" [--db-include dbname | --db-exclude dbname]\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
@ -458,6 +466,10 @@ help_validate(void)
printf(_(" the named restore point to which recovery will proceed\n"));
printf(_(" --skip-block-validation set to validate only file-level checksum\n"));
printf(_("\n Partial validation options:\n"));
printf(_(" --db-include dbname validate only files of specified databases\n"));
printf(_(" --db-exclude dbname do not validate files of specified databases\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
printf(_(" level for console logging (default: info)\n"));
@ -540,7 +552,7 @@ help_show(void)
printf(_(" [--format=format]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name show info about specific intstance\n"));
printf(_(" --instance=instance_name show info about specific instance\n"));
printf(_(" -i, --backup-id=backup-id show info about specific backups\n"));
printf(_(" --format=format show format=PLAIN|JSON\n\n"));
}

View File

@ -146,7 +146,7 @@ do_merge(time_t backup_id)
merge_backups(full_backup, from_backup);
}
pgBackupValidate(full_backup);
pgBackupValidate(full_backup, NULL);
if (full_backup->status == BACKUP_STATUS_CORRUPT)
elog(ERROR, "Merging of backup %s failed", base36enc(backup_id));
@ -160,6 +160,7 @@ do_merge(time_t backup_id)
/*
* Merge two backups data files using threads.
* - to_backup - FULL, from_backup - incremental.
* - move instance files from from_backup to to_backup
* - remove unnecessary directories and files from to_backup
* - update metadata of from_backup, it becames FULL backup
@ -197,7 +198,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
if (to_backup->status == BACKUP_STATUS_OK ||
to_backup->status == BACKUP_STATUS_DONE)
{
pgBackupValidate(to_backup);
pgBackupValidate(to_backup, NULL);
if (to_backup->status == BACKUP_STATUS_CORRUPT)
elog(ERROR, "Interrupt merging");
}
@ -210,7 +211,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
from_backup->status == BACKUP_STATUS_DONE ||
from_backup->status == BACKUP_STATUS_MERGING ||
from_backup->status == BACKUP_STATUS_DELETING);
pgBackupValidate(from_backup);
pgBackupValidate(from_backup, NULL);
if (from_backup->status == BACKUP_STATUS_CORRUPT)
elog(ERROR, "Interrupt merging");
@ -269,7 +270,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
false);
/*
* Rename external directoties in to_backup (if exists)
* Rename external directories in to_backup (if exists)
* according to numeration of external dirs in from_backup.
*/
if (to_external)
@ -355,9 +356,12 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
pgFile *file = (pgFile *) parray_get(files, i);
if (S_ISDIR(file->mode))
{
to_backup->data_bytes += 4096;
continue;
}
/* Count the amount of the data actually copied */
else if (S_ISREG(file->mode))
if (file->write_size > 0)
to_backup->data_bytes += file->write_size;
}
/* compute size of wal files of this backup stored in the archive */
@ -381,7 +385,7 @@ delete_source_backup:
/*
* Delete files which are not in from_backup file list.
*/
parray_qsort(files, pgFileComparePathDesc);
parray_qsort(files, pgFileComparePathWithExternalDesc);
for (i = 0; i < parray_num(to_files); i++)
{
pgFile *file = (pgFile *) parray_get(to_files, i);
@ -394,7 +398,7 @@ delete_source_backup:
continue;
}
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
if (parray_bsearch(files, file, pgFileComparePathWithExternalDesc) == NULL)
{
char to_file_path[MAXPGPATH];
char *prev_path;
@ -507,7 +511,7 @@ merge_files(void *arg)
(!file->is_datafile || file->is_cfs))
{
elog(VERBOSE, "Skip merging file \"%s\", the file didn't change",
file->path);
file->rel_path);
/*
* If the file wasn't changed, retreive its
@ -529,6 +533,10 @@ merge_files(void *arg)
}
}
/* TODO optimization: file from incremental backup has size 0, then
* just truncate the file from FULL backup
*/
/* We need to make full path, file object has relative path */
if (file->external_dir_num)
{
@ -560,46 +568,53 @@ merge_files(void *arg)
if (to_backup->compress_alg == PGLZ_COMPRESS ||
to_backup->compress_alg == ZLIB_COMPRESS)
{
char merge_to_file_path[MAXPGPATH];
char tmp_file_path[MAXPGPATH];
char *prev_path;
snprintf(merge_to_file_path, MAXPGPATH, "%s_merge", to_file_path);
snprintf(tmp_file_path, MAXPGPATH, "%s_tmp", to_file_path);
/* Start the magic */
/*
* Merge files:
* - if target file exists restore and decompress it to the temp
* path
* - if to_file in FULL backup exists, restore and decompress it to to_file_merge
* - decompress source file if necessary and merge it with the
* target decompressed file
* - compress result file
* target decompressed file in to_file_merge.
* - compress result file to to_file_tmp
* - rename to_file_tmp to to_file
*/
/*
* We need to decompress target file if it exists.
* We need to decompress target file in FULL backup if it exists.
*/
if (to_file)
{
elog(VERBOSE, "Merge target and source files into the temporary path \"%s\"",
tmp_file_path);
merge_to_file_path);
// TODO: truncate merge_to_file_path just in case?
/*
* file->path points to the file in from_root directory. But we
* need the file in directory to_root.
* file->path is relative, to_file_path - is absolute.
* Substitute them.
*/
prev_path = to_file->path;
to_file->path = to_file_path;
/* Decompress target file into temporary one */
restore_data_file(tmp_file_path, to_file, false, false,
restore_data_file(merge_to_file_path, to_file, false, false,
parse_program_version(to_backup->program_version));
to_file->path = prev_path;
}
else
elog(VERBOSE, "Restore source file into the temporary path \"%s\"",
tmp_file_path);
merge_to_file_path);
/* TODO: Optimize merge of new files */
/* Merge source file with target file */
restore_data_file(tmp_file_path, file,
restore_data_file(merge_to_file_path, file,
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
false,
parse_program_version(from_backup->program_version));
@ -609,12 +624,12 @@ merge_files(void *arg)
/* Again we need to change path */
prev_path = file->path;
file->path = tmp_file_path;
file->path = merge_to_file_path;
/* backup_data_file() requires file size to calculate nblocks */
file->size = pgFileSize(file->path);
/* Now we can compress the file */
backup_data_file(NULL, /* We shouldn't need 'arguments' here */
to_file_path, file,
tmp_file_path, file,
to_backup->start_lsn,
to_backup->backup_mode,
to_backup->compress_alg,
@ -623,10 +638,15 @@ merge_files(void *arg)
file->path = prev_path;
/* We can remove temporary file now */
if (unlink(tmp_file_path))
/* rename temp file */
if (rename(tmp_file_path, to_file_path) == -1)
elog(ERROR, "Could not rename file \"%s\" to \"%s\": %s",
file->path, tmp_file_path, strerror(errno));
/* We can remove temporary file */
if (unlink(merge_to_file_path))
elog(ERROR, "Could not remove temporary file \"%s\": %s",
tmp_file_path, strerror(errno));
merge_to_file_path, strerror(errno));
}
/*
* Otherwise merging algorithm is simpler.
@ -676,8 +696,8 @@ merge_files(void *arg)
elog(VERBOSE, "Merged file \"%s\": " INT64_FORMAT " bytes",
file->path, file->write_size);
else
elog(ERROR, "Merge of file \"%s\" failed. Invalid size: " INT64_FORMAT " bytes",
file->path, file->write_size);
elog(ERROR, "Merge of file \"%s\" failed. Invalid size: %i",
file->path, BYTES_INVALID);
/* Restore relative path */
file->path = prev_file_path;

View File

@ -228,7 +228,7 @@ static XLogRecPtr wal_target_lsn = InvalidXLogRecPtr;
* Read WAL from the archive directory, from 'startpoint' to 'endpoint' on the
* given timeline. Collect data blocks touched by the WAL records into a page map.
*
* Pagemap extracting is processed using threads. Eeach thread reads single WAL
* Pagemap extracting is processed using threads. Each thread reads single WAL
* file.
*/
void
@ -510,9 +510,18 @@ wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn,
xlogreader = InitXLogPageRead(&reader_data, archivedir, target_tli,
wal_seg_size, false, false, true);
if (xlogreader == NULL)
elog(ERROR, "Out of memory");
xlogreader->system_identifier = instance_config.system_identifier;
res = XLogReadRecord(xlogreader, target_lsn, &errormsg) != NULL;
/* Didn't find 'target_lsn' and there is no error, return false */
if (errormsg)
elog(WARNING, "Could not read WAL record at %X/%X: %s",
(uint32) (target_lsn >> 32), (uint32) (target_lsn), errormsg);
CleanupXLogPageRead(xlogreader);
XLogReaderFree(xlogreader);
@ -551,6 +560,11 @@ get_last_wal_lsn(const char *archivedir, XLogRecPtr start_lsn,
xlogreader = InitXLogPageRead(&reader_data, archivedir, tli, wal_seg_size,
false, false, true);
if (xlogreader == NULL)
elog(ERROR, "Out of memory");
xlogreader->system_identifier = instance_config.system_identifier;
/*
* Calculate startpoint. Decide: we should use 'start_lsn' or offset 0.
*/
@ -1477,7 +1491,7 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data,
if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno))
continue;
/* We only care about the main fork; others are copied in toto */
/* We only care about the main fork; others are copied as is */
if (forknum != MAIN_FORKNUM)
continue;

View File

@ -89,6 +89,7 @@ static char *target_name = NULL;
static char *target_action = NULL;
static pgRecoveryTarget *recovery_target_options = NULL;
static pgRestoreParams *restore_params = NULL;
bool restore_as_replica = false;
bool no_validate = false;
@ -96,6 +97,10 @@ bool no_validate = false;
bool skip_block_validation = false;
bool skip_external_dirs = false;
/* array for datnames, provided via db-include and db-exclude */
static parray *datname_exclude_list = NULL;
static parray *datname_include_list = NULL;
/* checkdb options */
bool need_amcheck = false;
bool heapallindexed = false;
@ -133,6 +138,9 @@ static void opt_show_format(ConfigOption *opt, const char *arg);
static void compress_init(void);
static void opt_datname_exclude_list(ConfigOption *opt, const char *arg);
static void opt_datname_include_list(ConfigOption *opt, const char *arg);
/*
* Short name should be non-printable ASCII character.
*/
@ -171,6 +179,8 @@ static ConfigOption cmd_options[] =
{ 'b', 143, "no-validate", &no_validate, SOURCE_CMD_STRICT },
{ 'b', 154, "skip-block-validation", &skip_block_validation, SOURCE_CMD_STRICT },
{ 'b', 156, "skip-external-dirs", &skip_external_dirs, SOURCE_CMD_STRICT },
{ 'f', 158, "db-include", opt_datname_include_list, SOURCE_CMD_STRICT },
{ 'f', 159, "db-exclude", opt_datname_exclude_list, SOURCE_CMD_STRICT },
/* checkdb options */
{ 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT },
{ 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT },
@ -304,7 +314,7 @@ main(int argc, char *argv[])
uint32 agent_version = parse_program_version(remote_agent);
elog(agent_version < AGENT_PROTOCOL_VERSION ? ERROR : WARNING,
"Agent version %s doesn't match master pg_probackup version %s",
remote_agent, PROGRAM_VERSION);
PROGRAM_VERSION, remote_agent);
}
fio_communicate(STDIN_FILENO, STDOUT_FILENO);
return 0;
@ -323,11 +333,11 @@ main(int argc, char *argv[])
|| strcmp(argv[1], "-V") == 0)
{
#ifdef PGPRO_VERSION
fprintf(stderr, "%s %s (Postgres Pro %s %s)\n",
fprintf(stdout, "%s %s (Postgres Pro %s %s)\n",
PROGRAM_NAME, PROGRAM_VERSION,
PGPRO_VERSION, PGPRO_EDITION);
#else
fprintf(stderr, "%s %s (PostgreSQL %s)\n",
fprintf(stdout, "%s %s (PostgreSQL %s)\n",
PROGRAM_NAME, PROGRAM_VERSION, PG_VERSION);
#endif
exit(0);
@ -490,7 +500,7 @@ main(int argc, char *argv[])
/* Usually checkdb for file logging requires log_directory
* to be specified explicitly, but if backup_dir and instance name are provided,
* checkdb can use the tusual default values or values from config
* checkdb can use the usual default values or values from config
*/
if (backup_subcmd == CHECKDB_CMD &&
(instance_config.logger.log_level_file != LOG_OFF &&
@ -589,9 +599,39 @@ main(int argc, char *argv[])
target_inclusive, target_tli, target_lsn,
(target_stop != NULL) ? target_stop :
(target_immediate) ? "immediate" : NULL,
target_name, target_action, no_validate);
target_name, target_action);
/* keep all params in one structure */
restore_params = pgut_new(pgRestoreParams);
restore_params->is_restore = (backup_subcmd == RESTORE_CMD);
restore_params->no_validate = no_validate;
restore_params->restore_as_replica = restore_as_replica;
restore_params->skip_block_validation = skip_block_validation;
restore_params->skip_external_dirs = skip_external_dirs;
restore_params->partial_db_list = NULL;
restore_params->partial_restore_type = NONE;
/* handle partial restore parameters */
if (datname_exclude_list && datname_include_list)
elog(ERROR, "You cannot specify '--db-include' and '--db-exclude' together");
if (datname_exclude_list)
{
restore_params->partial_restore_type = EXCLUDE;
restore_params->partial_db_list = datname_exclude_list;
}
else if (datname_include_list)
{
restore_params->partial_restore_type = INCLUDE;
restore_params->partial_db_list = datname_include_list;
}
}
/* sanity */
if (backup_subcmd == VALIDATE_CMD && restore_params->no_validate)
elog(ERROR, "You cannot specify \"--no-validate\" option with the \"%s\" command",
command_name);
if (num_threads < 1)
num_threads = 1;
@ -612,17 +652,10 @@ main(int argc, char *argv[])
return do_init();
case BACKUP_CMD:
{
const char *backup_mode;
time_t start_time;
time_t start_time = time(NULL);
start_time = time(NULL);
backup_mode = deparse_backup_mode(current.backup_mode);
current.stream = stream_wal;
elog(INFO, "Backup start, pg_probackup version: %s, backup ID: %s, backup mode: %s, instance: %s, stream: %s, remote %s",
PROGRAM_VERSION, base36enc(start_time), backup_mode, instance_name,
stream_wal ? "true" : "false", IsSshProtocol() ? "true" : "false");
/* sanity */
if (current.backup_mode == BACKUP_MODE_INVALID)
elog(ERROR, "required parameter not specified: BACKUP_MODE "
@ -632,25 +665,32 @@ main(int argc, char *argv[])
}
case RESTORE_CMD:
return do_restore_or_validate(current.backup_id,
recovery_target_options,
true);
recovery_target_options,
restore_params);
case VALIDATE_CMD:
if (current.backup_id == 0 && target_time == 0 && target_xid == 0)
if (current.backup_id == 0 && target_time == 0 && target_xid == 0 && !target_lsn)
{
/* sanity */
if (datname_exclude_list || datname_include_list)
elog(ERROR, "You must specify parameter (-i, --backup-id) for partial validation");
return do_validate_all();
}
else
/* PITR validation and, optionally, partial validation */
return do_restore_or_validate(current.backup_id,
recovery_target_options,
false);
restore_params);
case SHOW_CMD:
return do_show(current.backup_id);
case DELETE_CMD:
if (delete_expired && backup_id_string)
elog(ERROR, "You cannot specify --delete-expired and --backup-id options together");
elog(ERROR, "You cannot specify --delete-expired and (-i, --backup-id) options together");
if (merge_expired && backup_id_string)
elog(ERROR, "You cannot specify --merge-expired and --backup-id options together");
elog(ERROR, "You cannot specify --merge-expired and (-i, --backup-id) options together");
if (!delete_expired && !merge_expired && !delete_wal && !backup_id_string)
elog(ERROR, "You must specify at least one of the delete options: "
"--expired |--wal |--merge-expired |--delete-invalid |--backup_id");
"--delete-expired |--delete-wal |--merge-expired |(-i, --backup-id)");
if (!backup_id_string)
return do_retention();
else
@ -741,3 +781,40 @@ compress_init(void)
elog(ERROR, "Multithread backup does not support pglz compression");
}
}
/* Construct array of datnames, provided by user via db-exclude option */
void
opt_datname_exclude_list(ConfigOption *opt, const char *arg)
{
char *dbname = NULL;
if (!datname_exclude_list)
datname_exclude_list = parray_new();
dbname = pgut_malloc(strlen(arg) + 1);
/* TODO add sanity for database name */
strcpy(dbname, arg);
parray_append(datname_exclude_list, dbname);
}
/* Construct array of datnames, provided by user via db-include option */
void
opt_datname_include_list(ConfigOption *opt, const char *arg)
{
char *dbname = NULL;
if (!datname_include_list)
datname_include_list = parray_new();
dbname = pgut_malloc(strlen(arg) + 1);
if (strcmp(dbname, "tempate0") == 0 ||
strcmp(dbname, "tempate1") == 0)
elog(ERROR, "Databases 'template0' and 'template1' cannot be used for partial restore or validation");
strcpy(dbname, arg);
parray_append(datname_include_list, dbname);
}

View File

@ -61,13 +61,14 @@ extern const char *PROGRAM_EMAIL;
#define PG_BLACK_LIST "black_list"
#define PG_TABLESPACE_MAP_FILE "tablespace_map"
#define EXTERNAL_DIR "external_directories/externaldir"
#define DATABASE_MAP "database_map"
/* Timeout defaults */
#define PARTIAL_WAL_TIMER 60
#define ARCHIVE_TIMEOUT_DEFAULT 300
#define REPLICA_TIMEOUT_DEFAULT 300
/* Direcotry/File permission */
/* Directory/File permission */
#define DIR_PERMISSION (0700)
#define FILE_PERMISSION (0600)
@ -85,6 +86,19 @@ extern const char *PROGRAM_EMAIL;
#define XRecOffIsNull(xlrp) \
((xlrp) % XLOG_BLCKSZ == 0)
typedef struct db_map_entry
{
Oid dbOid;
char *datname;
} db_map_entry;
typedef enum PartialRestoreType
{
NONE,
INCLUDE,
EXCLUDE,
} PartialRestoreType;
typedef enum CompressAlg
{
NOT_DEFINED_COMPRESS = 0,
@ -189,8 +203,8 @@ typedef enum ShowFormat
#define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */
#define FILE_NOT_FOUND (-2) /* file disappeared during backup */
#define BLOCKNUM_INVALID (-1)
#define PROGRAM_VERSION "2.1.3"
#define AGENT_PROTOCOL_VERSION 20103
#define PROGRAM_VERSION "2.1.5"
#define AGENT_PROTOCOL_VERSION 20105
typedef struct ConnectionOptions
@ -249,9 +263,11 @@ typedef struct PGNodeInfo
uint32 block_size;
uint32 wal_block_size;
uint32 checksum_version;
bool is_superuser;
int server_version;
char server_version_str[100];
char program_version[100];
char server_version[100];
} PGNodeInfo;
typedef struct pgBackup pgBackup;
@ -263,7 +279,7 @@ struct pgBackup
time_t backup_id; /* Identifier of the backup.
* Currently it's the same as start_time */
BackupStatus status; /* Status - one of BACKUP_STATUS_xxx above*/
TimeLineID tli; /* timeline of start and stop baskup lsns */
TimeLineID tli; /* timeline of start and stop backup lsns */
XLogRecPtr start_lsn; /* backup's starting transaction log location */
XLogRecPtr stop_lsn; /* backup's finishing transaction log location */
time_t start_time; /* since this moment backup has status
@ -291,7 +307,6 @@ struct pgBackup
int compress_level;
/* Fields needed for compatibility check */
PGNodeInfo nodeInfo;
uint32 block_size;
uint32 wal_block_size;
uint32 checksum_version;
@ -329,9 +344,22 @@ typedef struct pgRecoveryTarget
const char *target_stop;
const char *target_name;
const char *target_action;
bool no_validate;
} pgRecoveryTarget;
/* Options needed for restore and validate commands */
typedef struct pgRestoreParams
{
bool is_restore;
bool no_validate;
bool restore_as_replica;
bool skip_external_dirs;
bool skip_block_validation;
/* options for partial restore */
PartialRestoreType partial_restore_type;
parray *partial_db_list;
} pgRestoreParams;
typedef struct
{
const char *from_root;
@ -437,11 +465,6 @@ extern char* remote_agent;
extern bool is_ptrack_support;
extern bool exclusive_backup;
/* restore options */
extern bool restore_as_replica;
extern bool skip_block_validation;
extern bool skip_external_dirs;
/* delete options */
extern bool delete_wal;
extern bool delete_expired;
@ -460,6 +483,7 @@ extern ShowFormat show_format;
/* checkdb options */
extern bool heapallindexed;
extern bool skip_block_validation;
/* current settings */
extern pgBackup current;
@ -487,7 +511,7 @@ extern char *pg_ptrack_get_block(ConnectionArgs *arguments,
/* in restore.c */
extern int do_restore_or_validate(time_t target_backup_id,
pgRecoveryTarget *rt,
bool is_restore);
pgRestoreParams *params);
extern bool satisfy_timeline(const parray *timelines, const pgBackup *backup);
extern bool satisfy_recovery_target(const pgBackup *backup,
const pgRecoveryTarget *rt);
@ -495,12 +519,17 @@ extern pgRecoveryTarget *parseRecoveryTargetOptions(
const char *target_time, const char *target_xid,
const char *target_inclusive, TimeLineID target_tli, const char* target_lsn,
const char *target_stop, const char *target_name,
const char *target_action, bool no_validate);
const char *target_action);
extern parray *get_dbOid_exclude_list(pgBackup *backup, parray *files,
parray *datname_list, PartialRestoreType partial_restore_type);
/* in merge.c */
extern void do_merge(time_t backup_id);
extern void merge_backups(pgBackup *backup, pgBackup *next_backup);
extern parray *read_database_map(pgBackup *backup);
/* in init.c */
extern int do_init(void);
extern int do_add_instance(void);
@ -538,7 +567,7 @@ extern void help_pg_probackup(void);
extern void help_command(char *command);
/* in validate.c */
extern void pgBackupValidate(pgBackup* backup);
extern void pgBackupValidate(pgBackup* backup, pgRestoreParams *params);
extern int do_validate_all(void);
/* in catalog.c */
@ -554,7 +583,8 @@ extern parray *catalog_get_backup_list(time_t requested_backup_id);
extern void catalog_lock_backup_list(parray *backup_list, int from_idx,
int to_idx);
extern pgBackup *catalog_get_last_data_backup(parray *backup_list,
TimeLineID tli);
TimeLineID tli,
time_t current_start_time);
extern void pgBackupWriteControl(FILE *out, pgBackup *backup);
extern void write_backup_filelist(pgBackup *backup, parray *files,
const char *root, parray *external_list);
@ -564,6 +594,7 @@ extern void pgBackupGetPath(const pgBackup *backup, char *path, size_t len,
extern void pgBackupGetPath2(const pgBackup *backup, char *path, size_t len,
const char *subdir1, const char *subdir2);
extern int pgBackupCreateDir(pgBackup *backup);
extern void pgNodeInit(PGNodeInfo *node);
extern void pgBackupInit(pgBackup *backup);
extern void pgBackupFree(void *backup);
extern int pgBackupCompareId(const void *f1, const void *f2);
@ -603,6 +634,11 @@ extern void check_tablespace_mapping(pgBackup *backup);
extern void check_external_dir_mapping(pgBackup *backup);
extern char *get_external_remap(char *current_dir);
extern void print_database_map(FILE *out, parray *database_list);
extern void write_database_map(pgBackup *backup, parray *database_list,
parray *backup_file_list);
extern void db_map_entry_free(void *map);
extern void print_file_list(FILE *out, const parray *files, const char *root,
const char *external_prefix, parray *external_list);
extern parray *dir_read_file_list(const char *root, const char *external_prefix,
@ -636,6 +672,7 @@ extern int pgFileComparePathDesc(const void *f1, const void *f2);
extern int pgFileComparePathWithExternalDesc(const void *f1, const void *f2);
extern int pgFileCompareLinked(const void *f1, const void *f2);
extern int pgFileCompareSize(const void *f1, const void *f2);
extern int pgCompareOid(const void *f1, const void *f2);
/* in data.c */
extern bool check_data_file(ConnectionArgs* arguments, pgFile* file, uint32 checksum_version);
@ -651,6 +688,8 @@ extern void restore_data_file(const char *to_path,
uint32 backup_version);
extern bool copy_file(fio_location from_location, const char *to_root,
fio_location to_location, pgFile *file, bool missing_ok);
extern bool create_empty_file(fio_location from_location, const char *to_root,
fio_location to_location, pgFile *file);
extern bool check_file_pages(pgFile *file, XLogRecPtr stop_lsn,
uint32 checksum_version, uint32 backup_version);
@ -697,6 +736,8 @@ extern bool parse_page(Page page, XLogRecPtr *lsn);
int32 do_compress(void* dst, size_t dst_size, void const* src, size_t src_size,
CompressAlg alg, int level, const char **errormsg);
extern void pretty_size(int64 size, char *buf, size_t len);
extern PGconn *pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo);
extern void check_system_identifiers(PGconn *conn, char *pgdata);

View File

@ -25,6 +25,8 @@ typedef struct
char *external_prefix;
parray *dest_external_dirs;
parray *dest_files;
parray *dbOid_exclude_list;
bool skip_external_dirs;
/*
* Return value from the thread.
@ -34,19 +36,65 @@ typedef struct
} restore_files_arg;
static void restore_backup(pgBackup *backup, parray *dest_external_dirs,
parray *dest_files);
parray *dest_files, parray *dbOid_exclude_list,
pgRestoreParams *params);
static void create_recovery_conf(time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup);
pgBackup *backup,
pgRestoreParams *params);
static parray *read_timeline_history(TimeLineID targetTLI);
static void *restore_files(void *arg);
static void set_orphan_status(parray *backups, pgBackup *parent_backup);
/*
* Iterate over backup list to find all ancestors of the broken parent_backup
* and update their status to BACKUP_STATUS_ORPHAN
*/
static void
set_orphan_status(parray *backups, pgBackup *parent_backup)
{
/* chain is intact, but at least one parent is invalid */
char *parent_backup_id;
int j;
/* parent_backup_id is a human-readable backup ID */
parent_backup_id = base36enc_dup(parent_backup->start_time);
for (j = 0; j < parray_num(backups); j++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
if (is_parent(parent_backup->start_time, backup, false))
{
if (backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE)
{
write_backup_status(backup, BACKUP_STATUS_ORPHAN);
elog(WARNING,
"Backup %s is orphaned because his parent %s has status: %s",
base36enc(backup->start_time),
parent_backup_id,
status2str(parent_backup->status));
}
else
{
elog(WARNING, "Backup %s has parent %s with status: %s",
base36enc(backup->start_time), parent_backup_id,
status2str(parent_backup->status));
}
}
}
pg_free(parent_backup_id);
}
/*
* Entry point of pg_probackup RESTORE and VALIDATE subcommands.
*/
int
do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
bool is_restore)
pgRestoreParams *params)
{
int i = 0;
int j = 0;
@ -56,10 +104,11 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
pgBackup *dest_backup = NULL;
pgBackup *base_full_backup = NULL;
pgBackup *corrupted_backup = NULL;
char *action = is_restore ? "Restore":"Validate";
char *action = params->is_restore ? "Restore":"Validate";
parray *parent_chain = NULL;
parray *dbOid_exclude_list = NULL;
if (is_restore)
if (params->is_restore)
{
if (instance_config.pgdata == NULL)
elog(ERROR,
@ -91,10 +140,16 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/*
* [PGPRO-1164] If BACKUP_ID is not provided for restore command,
* we must find the first valid(!) backup.
* If target_backup_id is not provided, we can be sure that
* PITR for restore or validate is requested.
* So we can assume that user is more interested in recovery to specific point
* in time and NOT interested in revalidation of invalid backups.
* So based on that assumptions we should choose only OK and DONE backups
* as candidates for validate and restore.
*/
if (is_restore &&
target_backup_id == INVALID_BACKUP_ID &&
if (target_backup_id == INVALID_BACKUP_ID &&
(current_backup->status != BACKUP_STATUS_OK &&
current_backup->status != BACKUP_STATUS_DONE))
{
@ -127,7 +182,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
if ((current_backup->status == BACKUP_STATUS_ORPHAN ||
current_backup->status == BACKUP_STATUS_CORRUPT ||
current_backup->status == BACKUP_STATUS_RUNNING)
&& !rt->no_validate)
&& !params->no_validate)
elog(WARNING, "Backup %s has status: %s",
base36enc(current_backup->start_time), status2str(current_backup->status));
else
@ -160,7 +215,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
if (!satisfy_recovery_target(current_backup, rt))
{
if (target_backup_id != INVALID_BACKUP_ID)
elog(ERROR, "target backup %s does not satisfy restore options",
elog(ERROR, "Requested backup %s does not satisfy restore options",
base36enc(target_backup_id));
else
/* Try to find another backup that satisfies target options */
@ -175,8 +230,16 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
}
}
/* TODO: Show latest possible target */
if (dest_backup == NULL)
elog(ERROR, "Backup satisfying target options is not found.");
{
/* Failed to find target backup */
if (target_backup_id)
elog(ERROR, "Requested backup %s is not found.", base36enc(target_backup_id));
else
elog(ERROR, "Backup satisfying target options is not found.");
/* TODO: check if user asked PITR or just restore of latest backup */
}
/* If we already found dest_backup, look for full backup. */
if (dest_backup->backup_mode == BACKUP_MODE_FULL)
@ -193,7 +256,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
* and orphinize all his descendants
*/
char *missing_backup_id;
time_t missing_backup_start_time;
time_t missing_backup_start_time;
missing_backup_start_time = tmp_backup->parent_backup;
missing_backup_id = base36enc_dup(tmp_backup->parent_backup);
@ -229,38 +292,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
else if (result == 1)
{
/* chain is intact, but at least one parent is invalid */
char *parent_backup_id;
/* parent_backup_id contain human-readable backup ID of oldest invalid backup */
parent_backup_id = base36enc_dup(tmp_backup->start_time);
for (j = 0; j < parray_num(backups); j++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
if (is_parent(tmp_backup->start_time, backup, false))
{
if (backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE)
{
write_backup_status(backup, BACKUP_STATUS_ORPHAN);
elog(WARNING,
"Backup %s is orphaned because his parent %s has status: %s",
base36enc(backup->start_time),
parent_backup_id,
status2str(tmp_backup->status));
}
else
{
elog(WARNING, "Backup %s has parent %s with status: %s",
base36enc(backup->start_time), parent_backup_id,
status2str(tmp_backup->status));
}
}
}
pg_free(parent_backup_id);
set_orphan_status(backups, tmp_backup);
tmp_backup = find_parent_full_backup(dest_backup);
/* sanity */
@ -286,12 +318,12 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
* Ensure that directories provided in tablespace mapping are valid
* i.e. empty or not exist.
*/
if (is_restore)
if (params->is_restore)
{
check_tablespace_mapping(dest_backup);
/* no point in checking external directories if their restore is not resquested */
if (!skip_external_dirs)
/* no point in checking external directories if their restore is not requested */
if (!params->skip_external_dirs)
check_external_dir_mapping(dest_backup);
}
@ -315,7 +347,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
parray_append(parent_chain, base_full_backup);
/* for validation or restore with enabled validation */
if (!is_restore || !rt->no_validate)
if (!params->is_restore || !params->no_validate)
{
if (dest_backup->backup_mode != BACKUP_MODE_FULL)
elog(INFO, "Validating parents for backup %s", base36enc(dest_backup->start_time));
@ -330,7 +362,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/* Do not interrupt, validate the next backup */
if (!lock_backup(tmp_backup))
{
if (is_restore)
if (params->is_restore)
elog(ERROR, "Cannot lock backup %s directory",
base36enc(tmp_backup->start_time));
else
@ -341,7 +373,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
}
}
pgBackupValidate(tmp_backup);
pgBackupValidate(tmp_backup, params);
/* After pgBackupValidate() only following backup
* states are possible: ERROR, RUNNING, CORRUPT and OK.
* Validate WAL only for OK, because there is no point
@ -369,32 +401,9 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
rt->target_xid, rt->target_lsn,
base_full_backup->tli, instance_config.xlog_seg_size);
}
/* Orphinize every OK descendant of corrupted backup */
/* Orphanize every OK descendant of corrupted backup */
else
{
char *corrupted_backup_id;
corrupted_backup_id = base36enc_dup(corrupted_backup->start_time);
for (j = 0; j < parray_num(backups); j++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
if (is_parent(corrupted_backup->start_time, backup, false))
{
if (backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE)
{
write_backup_status(backup, BACKUP_STATUS_ORPHAN);
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
base36enc(backup->start_time),
corrupted_backup_id,
status2str(corrupted_backup->status));
}
}
}
free(corrupted_backup_id);
}
set_orphan_status(backups, corrupted_backup);
}
/*
@ -404,7 +413,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
if (dest_backup->status == BACKUP_STATUS_OK ||
dest_backup->status == BACKUP_STATUS_DONE)
{
if (rt->no_validate)
if (params->no_validate)
elog(WARNING, "Backup %s is used without validation.", base36enc(dest_backup->start_time));
else
elog(INFO, "Backup %s is valid.", base36enc(dest_backup->start_time));
@ -420,7 +429,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/* We ensured that all backups are valid, now restore if required
* TODO: before restore - lock entire parent chain
*/
if (is_restore)
if (params->is_restore)
{
parray *dest_external_dirs = NULL;
parray *dest_files;
@ -437,6 +446,21 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
FIO_BACKUP_HOST);
parray_qsort(dest_files, pgFileCompareRelPathWithExternal);
/*
* Get a list of dbOids to skip if user requested the partial restore.
* It is important that we do this after(!) validation so
* database_map can be trusted.
* NOTE: database_map could be missing for legal reasons, e.g. missing
* permissions on pg_database during `backup` and, as long as user
* do not request partial restore, it`s OK.
*
* If partial restore is requested and database map doesn't exist,
* throw an error.
*/
if (params->partial_db_list)
dbOid_exclude_list = get_dbOid_exclude_list(dest_backup, dest_files, params->partial_db_list,
params->partial_restore_type);
/*
* Restore dest_backup internal directories.
*/
@ -448,7 +472,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/*
* Restore dest_backup external directories.
*/
if (dest_backup->external_dir_str && !skip_external_dirs)
if (dest_backup->external_dir_str && !params->skip_external_dirs)
{
dest_external_dirs = make_external_directory_list(
dest_backup->external_dir_str,
@ -462,7 +486,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
}
/*
* At least restore backups files starting from the parent backup.
* Restore backups files starting from the parent backup.
*/
for (i = parray_num(parent_chain) - 1; i >= 0; i--)
{
@ -478,10 +502,10 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
* Backup was locked during validation if no-validate wasn't
* specified.
*/
if (rt->no_validate && !lock_backup(backup))
if (params->no_validate && !lock_backup(backup))
elog(ERROR, "Cannot lock backup directory");
restore_backup(backup, dest_external_dirs, dest_files);
restore_backup(backup, dest_external_dirs, dest_files, dbOid_exclude_list, params);
}
if (dest_external_dirs != NULL)
@ -491,7 +515,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
parray_free(dest_files);
/* Create recovery.conf with given recovery target parameters */
create_recovery_conf(target_backup_id, rt, dest_backup);
create_recovery_conf(target_backup_id, rt, dest_backup, params);
}
/* cleanup */
@ -508,7 +532,9 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
* Restore one backup.
*/
void
restore_backup(pgBackup *backup, parray *dest_external_dirs, parray *dest_files)
restore_backup(pgBackup *backup, parray *dest_external_dirs,
parray *dest_files, parray *dbOid_exclude_list,
pgRestoreParams *params)
{
char timestamp[100];
char database_path[MAXPGPATH];
@ -568,7 +594,7 @@ restore_backup(pgBackup *backup, parray *dest_external_dirs, parray *dest_files)
/*
* If the entry was an external directory, create it in the backup.
*/
if (!skip_external_dirs &&
if (!params->skip_external_dirs &&
file->external_dir_num && S_ISDIR(file->mode) &&
/* Do not create unnecessary external directories */
parray_bsearch(dest_files, file, pgFileCompareRelPathWithExternal))
@ -615,6 +641,8 @@ restore_backup(pgBackup *backup, parray *dest_external_dirs, parray *dest_files)
arg->external_prefix = external_prefix;
arg->dest_external_dirs = dest_external_dirs;
arg->dest_files = dest_files;
arg->dbOid_exclude_list = dbOid_exclude_list;
arg->skip_external_dirs = params->skip_external_dirs;
/* By default there are some error */
threads_args[i].ret = 1;
@ -671,11 +699,35 @@ restore_files(void *arg)
if (interrupted || thread_interrupted)
elog(ERROR, "Interrupted during restore database");
/* Directories were created before */
if (S_ISDIR(file->mode))
continue;
if (progress)
elog(INFO, "Progress: (%d/%lu). Process file %s ",
i + 1, (unsigned long) parray_num(arguments->files),
file->rel_path);
/* Only files from pgdata can be skipped by partial restore */
if (arguments->dbOid_exclude_list && file->external_dir_num == 0)
{
/* Check if the file belongs to the database we exclude */
if (parray_bsearch(arguments->dbOid_exclude_list,
&file->dbOid, pgCompareOid))
{
/*
* We cannot simply skip the file, because it may lead to
* failure during WAL redo; hence, create empty file.
*/
create_empty_file(FIO_BACKUP_HOST,
instance_config.pgdata, FIO_DB_HOST, file);
elog(VERBOSE, "Exclude file due to partial restore: \"%s\"",
file->rel_path);
continue;
}
}
/*
* For PAGE and PTRACK backups skip datafiles which haven't changed
* since previous backup and thus were not backed up.
@ -696,10 +748,6 @@ restore_files(void *arg)
}
}
/* Directories were created before */
if (S_ISDIR(file->mode))
continue;
/* Do not restore tablespace_map file */
if (path_is_prefix_of_path(PG_TABLESPACE_MAP_FILE, file->rel_path))
{
@ -707,8 +755,16 @@ restore_files(void *arg)
continue;
}
/* Do not restore database_map file */
if ((file->external_dir_num == 0) &&
strcmp(DATABASE_MAP, file->rel_path) == 0)
{
elog(VERBOSE, "Skip database_map");
continue;
}
/* Do no restore external directory file if a user doesn't want */
if (skip_external_dirs && file->external_dir_num > 0)
if (arguments->skip_external_dirs && file->external_dir_num > 0)
continue;
/* Skip unnecessary file */
@ -722,8 +778,9 @@ restore_files(void *arg)
* block and have BackupPageHeader meta information, so we cannot just
* copy the file from backup.
*/
elog(VERBOSE, "Restoring file %s, is_datafile %i, is_cfs %i",
elog(VERBOSE, "Restoring file \"%s\", is_datafile %i, is_cfs %i",
file->path, file->is_datafile?1:0, file->is_cfs?1:0);
if (file->is_datafile && !file->is_cfs)
{
char to_path[MAXPGPATH];
@ -769,7 +826,8 @@ restore_files(void *arg)
static void
create_recovery_conf(time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup)
pgBackup *backup,
pgRestoreParams *params)
{
char path[MAXPGPATH];
FILE *fp;
@ -782,14 +840,14 @@ create_recovery_conf(time_t backup_id,
(rt->time_string || rt->xid_string || rt->lsn_string) || target_latest;
/* No need to generate recovery.conf at all. */
if (!(need_restore_conf || restore_as_replica))
if (!(need_restore_conf || params->restore_as_replica))
return;
elog(LOG, "----------------------------------------");
elog(LOG, "creating recovery.conf");
snprintf(path, lengthof(path), "%s/recovery.conf", instance_config.pgdata);
fp = fio_fopen(path, "wt", FIO_DB_HOST);
fp = fio_fopen(path, "w", FIO_DB_HOST);
if (fp == NULL)
elog(ERROR, "cannot open recovery.conf \"%s\": %s", path,
strerror(errno));
@ -835,7 +893,7 @@ create_recovery_conf(time_t backup_id,
fio_fprintf(fp, "recovery_target_action = '%s'\n", rt->target_action);
}
if (restore_as_replica)
if (params->restore_as_replica)
{
fio_fprintf(fp, "standby_mode = 'on'\n");
@ -954,6 +1012,7 @@ read_timeline_history(TimeLineID targetTLI)
return result;
}
/* TODO: do not ignore timelines. What if requested target located in different timeline? */
bool
satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt)
{
@ -969,6 +1028,7 @@ satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt)
return true;
}
/* TODO description */
bool
satisfy_timeline(const parray *timelines, const pgBackup *backup)
{
@ -998,8 +1058,7 @@ parseRecoveryTargetOptions(const char *target_time,
const char *target_lsn,
const char *target_stop,
const char *target_name,
const char *target_action,
bool no_validate)
const char *target_action)
{
bool dummy_bool;
/*
@ -1023,7 +1082,7 @@ parseRecoveryTargetOptions(const char *target_time,
if (parse_time(target_time, &dummy_time, false))
rt->target_time = dummy_time;
else
elog(ERROR, "Invalid value for --recovery-target-time option %s",
elog(ERROR, "Invalid value for '--recovery-target-time' option %s",
target_time);
}
@ -1041,7 +1100,7 @@ parseRecoveryTargetOptions(const char *target_time,
#endif
rt->target_xid = dummy_xid;
else
elog(ERROR, "Invalid value for --recovery-target-xid option %s",
elog(ERROR, "Invalid value for '--recovery-target-xid' option %s",
target_xid);
}
@ -1054,7 +1113,7 @@ parseRecoveryTargetOptions(const char *target_time,
if (parse_lsn(target_lsn, &dummy_lsn))
rt->target_lsn = dummy_lsn;
else
elog(ERROR, "Invalid value of --ecovery-target-lsn option %s",
elog(ERROR, "Invalid value of '--recovery-target-lsn' option %s",
target_lsn);
}
@ -1064,7 +1123,7 @@ parseRecoveryTargetOptions(const char *target_time,
if (parse_bool(target_inclusive, &dummy_bool))
rt->target_inclusive = dummy_bool;
else
elog(ERROR, "Invalid value for --recovery-target-inclusive option %s",
elog(ERROR, "Invalid value for '--recovery-target-inclusive' option %s",
target_inclusive);
}
@ -1073,15 +1132,13 @@ parseRecoveryTargetOptions(const char *target_time,
{
if ((strcmp(target_stop, "immediate") != 0)
&& (strcmp(target_stop, "latest") != 0))
elog(ERROR, "Invalid value for --recovery-target option %s",
elog(ERROR, "Invalid value for '--recovery-target' option %s",
target_stop);
recovery_target_specified++;
rt->target_stop = target_stop;
}
rt->no_validate = no_validate;
if (target_name)
{
recovery_target_specified++;
@ -1093,7 +1150,7 @@ parseRecoveryTargetOptions(const char *target_time,
if ((strcmp(target_action, "pause") != 0)
&& (strcmp(target_action, "promote") != 0)
&& (strcmp(target_action, "shutdown") != 0))
elog(ERROR, "Invalid value for --recovery-target-action option %s",
elog(ERROR, "Invalid value for '--recovery-target-action' option %s",
target_action);
rt->target_action = target_action;
@ -1118,3 +1175,137 @@ parseRecoveryTargetOptions(const char *target_time,
return rt;
}
/*
* Return array of dbOids of databases that should not be restored
* Regardless of what option user used, db-include or db-exclude,
* we always convert it into exclude_list.
*/
parray *
get_dbOid_exclude_list(pgBackup *backup, parray *files,
parray *datname_list, PartialRestoreType partial_restore_type)
{
int i;
int j;
parray *database_map = NULL;
parray *dbOid_exclude_list = NULL;
pgFile *database_map_file = NULL;
pg_crc32 crc;
char path[MAXPGPATH];
char database_map_path[MAXPGPATH];
/* make sure that database_map is in backup_content.control */
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
if ((file->external_dir_num == 0) &&
strcmp(DATABASE_MAP, file->name) == 0)
{
database_map_file = file;
break;
}
}
if (!database_map_file)
elog(ERROR, "Backup %s doesn't contain a database_map, partial restore is impossible.",
base36enc(backup->start_time));
pgBackupGetPath(backup, path, lengthof(path), DATABASE_DIR);
join_path_components(database_map_path, path, DATABASE_MAP);
/* check database_map CRC */
crc = pgFileGetCRC(database_map_path, true, true, NULL, FIO_LOCAL_HOST);
if (crc != database_map_file->crc)
elog(ERROR, "Invalid CRC of backup file \"%s\" : %X. Expected %X",
database_map_file->path, crc, database_map_file->crc);
/* get database_map from file */
database_map = read_database_map(backup);
/* partial restore requested but database_map is missing */
if (!database_map)
elog(ERROR, "Backup %s has empty or mangled database_map, partial restore is impossible.",
base36enc(backup->start_time));
/*
* So we have a list of datnames and a database_map for it.
* We must construct a list of dbOids to exclude.
*/
if (partial_restore_type == INCLUDE)
{
/* For 'include', keep dbOid of every datname NOT specified by user */
for (i = 0; i < parray_num(datname_list); i++)
{
bool found_match = false;
char *datname = (char *) parray_get(datname_list, i);
for (j = 0; j < parray_num(database_map); j++)
{
db_map_entry *db_entry = (db_map_entry *) parray_get(database_map, j);
/* got a match */
if (strcmp(db_entry->datname, datname) == 0)
{
found_match = true;
/* for db-include we must exclude db_entry from database_map */
parray_remove(database_map, j);
j--;
}
}
/* If specified datname is not found in database_map, error out */
if (!found_match)
elog(ERROR, "Failed to find a database '%s' in database_map of backup %s",
datname, base36enc(backup->start_time));
}
/* At this moment only databases to exclude are left in the map */
for (j = 0; j < parray_num(database_map); j++)
{
db_map_entry *db_entry = (db_map_entry *) parray_get(database_map, j);
if (!dbOid_exclude_list)
dbOid_exclude_list = parray_new();
parray_append(dbOid_exclude_list, &db_entry->dbOid);
}
}
else if (partial_restore_type == EXCLUDE)
{
/* For exclude, job is easier - find dbOid for every specified datname */
for (i = 0; i < parray_num(datname_list); i++)
{
bool found_match = false;
char *datname = (char *) parray_get(datname_list, i);
for (j = 0; j < parray_num(database_map); j++)
{
db_map_entry *db_entry = (db_map_entry *) parray_get(database_map, j);
/* got a match */
if (strcmp(db_entry->datname, datname) == 0)
{
found_match = true;
/* for db-exclude we must add dbOid to exclude list */
if (!dbOid_exclude_list)
dbOid_exclude_list = parray_new();
parray_append(dbOid_exclude_list, &db_entry->dbOid);
}
}
/* If specified datname is not found in database_map, error out */
if (!found_match)
elog(ERROR, "Failed to find a database '%s' in database_map of backup %s",
datname, base36enc(backup->start_time));
}
}
/* extra sanity: ensure that list is not empty */
if (!dbOid_exclude_list || parray_num(dbOid_exclude_list) < 1)
elog(ERROR, "Failed to find a match in database_map of backup %s for partial restore",
base36enc(backup->start_time));
/* sort dbOid array in ASC order */
parray_qsort(dbOid_exclude_list, pgCompareOid);
return dbOid_exclude_list;
}

View File

@ -118,7 +118,7 @@ do_show(time_t requested_backup_id)
return show_backup(requested_backup_id);
}
static void
void
pretty_size(int64 size, char *buf, size_t len)
{
int exp = 0;

View File

@ -153,7 +153,7 @@ get_current_timeline(bool safe)
size_t size;
/* First fetch file... */
buffer = slurpFile(instance_config.pgdata, "global/pg_control", &size,
buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size,
safe, FIO_DB_HOST);
if (safe && buffer == NULL)
return 0;
@ -196,7 +196,7 @@ get_checkpoint_location(PGconn *conn)
size_t size;
ControlFileData ControlFile;
buffer = fetchFile(conn, "global/pg_control", &size);
buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST);
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
@ -212,7 +212,7 @@ get_system_identifier(const char *pgdata_path)
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata_path, "global/pg_control", &size, false, FIO_DB_HOST);
buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST);
if (buffer == NULL)
return 0;
digestControlFile(&ControlFile, buffer, size);
@ -246,7 +246,7 @@ get_remote_system_identifier(PGconn *conn)
size_t size;
ControlFileData ControlFile;
buffer = fetchFile(conn, "global/pg_control", &size);
buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST);
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
@ -263,9 +263,7 @@ get_xlog_seg_size(char *pgdata_path)
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata_path, "global/pg_control", &size, false, FIO_DB_HOST);
if (buffer == NULL)
return 0;
buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST);
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
@ -283,7 +281,7 @@ get_data_checksum_version(bool safe)
size_t size;
/* First fetch file... */
buffer = slurpFile(instance_config.pgdata, "global/pg_control", &size,
buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size,
safe, FIO_DB_HOST);
if (buffer == NULL)
return 0;
@ -301,9 +299,8 @@ get_pgcontrol_checksum(const char *pgdata_path)
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata_path, "global/pg_control", &size, false, FIO_BACKUP_HOST);
if (buffer == NULL)
return 0;
buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, FIO_BACKUP_HOST);
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
@ -325,9 +322,6 @@ set_min_recovery_point(pgFile *file, const char *backup_path,
/* First fetch file content */
buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST);
if (buffer == NULL)
elog(ERROR, "ERROR");
digestControlFile(&ControlFile, buffer, size);
elog(LOG, "Current minRecPoint %X/%X",

View File

@ -20,6 +20,7 @@ static __thread unsigned long fio_fdset = 0;
static __thread void* fio_stdin_buffer;
static __thread int fio_stdout = 0;
static __thread int fio_stdin = 0;
static __thread int fio_stderr = 0;
fio_location MyLocation;
@ -37,11 +38,30 @@ typedef struct
/* Convert FIO pseudo handle to index in file descriptor array */
#define fio_fileno(f) (((size_t)f - 1) | FIO_PIPE_MARKER)
/* Use specified file descriptors as stding/stdout for FIO functions */
void fio_redirect(int in, int out)
/* Use specified file descriptors as stdin/stdout for FIO functions */
void fio_redirect(int in, int out, int err)
{
fio_stdin = in;
fio_stdout = out;
fio_stderr = err;
}
void fio_error(int rc, int size, char const* file, int line)
{
if (remote_agent)
{
fprintf(stderr, "%s:%d: proceeds %d bytes instead of %d: %s\n", file, line, rc, size, rc >= 0 ? "end of data" : strerror(errno));
exit(EXIT_FAILURE);
}
else
{
char buf[PRINTF_BUF_SIZE];
int err_size = read(fio_stderr, buf, sizeof(buf));
if (err_size > 0)
elog(ERROR, "Agent error: %s", buf);
else
elog(ERROR, "Communication error: %s", rc >= 0 ? "end of data" : strerror(errno));
}
}
/* Check if file descriptor is local or remote (created by FIO) */
@ -726,7 +746,7 @@ int fio_access(char const* path, int mode, fio_location location)
}
}
/* Create symbolink link */
/* Create symbolic link */
int fio_symlink(char const* target, char const* link_path, fio_location location)
{
if (fio_is_remote(location))
@ -822,7 +842,7 @@ int fio_mkdir(char const* path, int mode, fio_location location)
}
}
/* Checnge file mode */
/* Change file mode */
int fio_chmod(char const* path, int mode, fio_location location)
{
if (fio_is_remote(location))
@ -954,7 +974,7 @@ fio_gzread(gzFile f, void *buf, unsigned size)
while (1)
{
if (gz->strm.avail_in != 0) /* If there is some data in receiver buffer, then decmpress it */
if (gz->strm.avail_in != 0) /* If there is some data in receiver buffer, then decompress it */
{
rc = inflate(&gz->strm, Z_NO_FLUSH);
if (rc == Z_STREAM_END)
@ -1021,7 +1041,7 @@ fio_gzwrite(gzFile f, void const* buf, unsigned size)
{
rc = deflate(&gz->strm, Z_NO_FLUSH);
Assert(rc == Z_OK);
gz->strm.next_out = gz->buf; /* Reset pointer to the beginning of bufer */
gz->strm.next_out = gz->buf; /* Reset pointer to the beginning of buffer */
}
else
{
@ -1429,7 +1449,7 @@ void fio_communicate(int in, int out)
case FIO_UNLINK: /* Remove file or directory (TODO: Win32) */
SYS_CHECK(remove_file_or_dir(buf));
break;
case FIO_MKDIR: /* Create direcory */
case FIO_MKDIR: /* Create directory */
hdr.size = 0;
hdr.arg = dir_create_dir(buf, hdr.arg);
IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));

View File

@ -49,7 +49,7 @@ typedef enum
#define PAGE_CHECKSUM_MISMATCH (-256)
#define SYS_CHECK(cmd) do if ((cmd) < 0) { fprintf(stderr, "%s:%d: (%s) %s\n", __FILE__, __LINE__, #cmd, strerror(errno)); exit(EXIT_FAILURE); } while (0)
#define IO_CHECK(cmd, size) do { int _rc = (cmd); if (_rc != (size)) { if (remote_agent) { fprintf(stderr, "%s:%d: proceeds %d bytes instead of %d: %s\n", __FILE__, __LINE__, _rc, (int)(size), _rc >= 0 ? "end of data" : strerror(errno)); exit(EXIT_FAILURE); } else elog(ERROR, "Communication error: %s", _rc >= 0 ? "end of data" : strerror(errno)); } } while (0)
#define IO_CHECK(cmd, size) do { int _rc = (cmd); if (_rc != (size)) fio_error(_rc, size, __FILE__, __LINE__); } while (0)
typedef struct
{
@ -64,7 +64,7 @@ extern fio_location MyLocation;
/* Check if FILE handle is local or remote (created by FIO) */
#define fio_is_remote_file(file) ((size_t)(file) <= FIO_FDMAX)
extern void fio_redirect(int in, int out);
extern void fio_redirect(int in, int out, int err);
extern void fio_communicate(int in, int out);
extern FILE* fio_fopen(char const* name, char const* mode, fio_location location);
@ -77,6 +77,7 @@ extern int fio_fseek(FILE* f, off_t offs);
extern int fio_ftruncate(FILE* f, off_t size);
extern int fio_fclose(FILE* f);
extern int fio_ffstat(FILE* f, struct stat* st);
extern void fio_error(int rc, int size, char const* file, int line);
struct pgFile;
extern int fio_send_pages(FILE* in, FILE* out, struct pgFile *file, XLogRecPtr horizonLsn,

View File

@ -15,7 +15,7 @@
/* members of struct parray are hidden from client. */
struct parray
{
void **data; /* poiter array, expanded if necessary */
void **data; /* pointer array, expanded if necessary */
size_t alloced; /* number of elements allocated */
size_t used; /* number of elements in use */
};
@ -97,7 +97,7 @@ parray_insert(parray *array, size_t index, void *elem)
}
/*
* Concatinate two parray.
* Concatenate two parray.
* parray_concat() appends the copy of the content of src to the end of dest.
*/
parray *

View File

@ -206,6 +206,9 @@ pgut_get_conninfo_string(PGconn *conn)
return connstr;
}
/* TODO: it is better to use PQconnectdbParams like in psql
* It will allow to set application_name for pg_probackup
*/
PGconn *
pgut_connect(const char *host, const char *port,
const char *dbname, const char *username)
@ -419,7 +422,7 @@ pgut_execute_parallel(PGconn* conn,
}
if (!PQconsumeInput(conn))
elog(ERROR, "query failed: %squery was: %s",
elog(ERROR, "query failed: %s query was: %s",
PQerrorMessage(conn), query);
/* query is no done */
@ -754,7 +757,7 @@ on_interrupt(void)
interrupted = true;
/*
* User promts password, call on_cleanup() byhand. Unless we do that we will
* User prompts password, call on_cleanup() byhand. Unless we do that we will
* get stuck forever until a user enters a password.
*/
if (in_password)

View File

@ -70,9 +70,16 @@ static void kill_child(void)
void wait_ssh(void)
{
/*
* We need to wait termination of SSH process to eliminate zombies.
* There is no waitpid() function at Windows but there are no zombie processes caused by lack of wait/waitpid.
* So just disable waitpid for Windows.
*/
#ifndef WIN32
int status;
waitpid(child_pid, &status, 0);
elog(LOG, "SSH process %d is terminated with status %d", child_pid, status);
#endif
}
#ifdef WIN32
@ -103,6 +110,7 @@ bool launch_agent(void)
int ssh_argc;
int outfd[2];
int infd[2];
int errfd[2];
ssh_argc = 0;
#ifdef WIN32
@ -126,11 +134,9 @@ bool launch_agent(void)
if (instance_config.remote.ssh_options != NULL) {
ssh_argc = split_options(ssh_argc, ssh_argv, MAX_CMDLINE_OPTIONS, pg_strdup(instance_config.remote.ssh_options));
}
if (num_threads > 1)
{
ssh_argv[ssh_argc++] = "-o";
ssh_argv[ssh_argc++] = "PasswordAuthentication=no";
}
ssh_argv[ssh_argc++] = "-o";
ssh_argv[ssh_argc++] = "PasswordAuthentication=no";
ssh_argv[ssh_argc++] = "-o";
ssh_argv[ssh_argc++] = "Compression=no";
@ -190,20 +196,25 @@ bool launch_agent(void)
#else
SYS_CHECK(pipe(infd));
SYS_CHECK(pipe(outfd));
SYS_CHECK(pipe(errfd));
SYS_CHECK(child_pid = fork());
if (child_pid == 0) { /* child */
SYS_CHECK(close(STDIN_FILENO));
SYS_CHECK(close(STDOUT_FILENO));
SYS_CHECK(close(STDERR_FILENO));
SYS_CHECK(dup2(outfd[0], STDIN_FILENO));
SYS_CHECK(dup2(infd[1], STDOUT_FILENO));
SYS_CHECK(dup2(errfd[1], STDERR_FILENO));
SYS_CHECK(close(infd[0]));
SYS_CHECK(close(infd[1]));
SYS_CHECK(close(outfd[0]));
SYS_CHECK(close(outfd[1]));
SYS_CHECK(close(errfd[0]));
SYS_CHECK(close(errfd[1]));
if (execvp(ssh_argv[0], ssh_argv) < 0)
return false;
@ -212,9 +223,10 @@ bool launch_agent(void)
elog(LOG, "Spawn agent %d version %s", child_pid, PROGRAM_VERSION);
SYS_CHECK(close(infd[1])); /* These are being used by the child */
SYS_CHECK(close(outfd[0]));
SYS_CHECK(close(errfd[1]));
/*atexit(kill_child);*/
fio_redirect(infd[0], outfd[1]); /* write to stdout */
fio_redirect(infd[0], outfd[1], errfd[0]); /* write to stdout */
}
return true;
}

View File

@ -30,6 +30,7 @@ typedef struct
uint32 checksum_version;
uint32 backup_version;
BackupMode backup_mode;
parray *dbOid_exclude_list;
/*
* Return value from the thread.
@ -42,7 +43,7 @@ typedef struct
* Validate backup files.
*/
void
pgBackupValidate(pgBackup *backup)
pgBackupValidate(pgBackup *backup, pgRestoreParams *params)
{
char base_path[MAXPGPATH];
char external_prefix[MAXPGPATH];
@ -54,6 +55,7 @@ pgBackupValidate(pgBackup *backup)
pthread_t *threads;
validate_files_arg *threads_args;
int i;
parray *dbOid_exclude_list = NULL;
/* Check backup version */
if (parse_program_version(backup->program_version) > parse_program_version(PROGRAM_VERSION))
@ -105,6 +107,10 @@ pgBackupValidate(pgBackup *backup)
pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST);
files = dir_read_file_list(base_path, external_prefix, path, FIO_BACKUP_HOST);
if (params && params->partial_db_list)
dbOid_exclude_list = get_dbOid_exclude_list(backup, files, params->partial_db_list,
params->partial_restore_type);
/* setup threads */
for (i = 0; i < parray_num(files); i++)
{
@ -130,6 +136,7 @@ pgBackupValidate(pgBackup *backup)
arg->stop_lsn = backup->stop_lsn;
arg->checksum_version = backup->checksum_version;
arg->backup_version = parse_program_version(backup->program_version);
arg->dbOid_exclude_list = dbOid_exclude_list;
/* By default there are some error */
threads_args[i].ret = 1;
@ -193,6 +200,19 @@ pgBackupValidateFiles(void *arg)
if (!S_ISREG(file->mode))
continue;
/*
* If in partial validate, check if the file belongs to the database
* we exclude. Only files from pgdata can be skipped.
*/
if (arguments->dbOid_exclude_list && file->external_dir_num == 0
&& parray_bsearch(arguments->dbOid_exclude_list,
&file->dbOid, pgCompareOid))
{
elog(VERBOSE, "Skip file validation due to partial restore: \"%s\"",
file->rel_path);
continue;
}
/*
* Currently we don't compute checksums for
* cfs_compressed data files, so skip them.
@ -371,7 +391,7 @@ do_validate_all(void)
/* TODO: Probably we should have different exit code for every condition
* and they combination:
* 0 - all backups are valid
* 1 - some backups are corrup
* 1 - some backups are corrupt
* 2 - some backups where skipped due to concurrent locks
* 3 - some backups are corrupt and some are skipped due to concurrent locks
*/
@ -498,7 +518,7 @@ do_validate_instance(void)
continue;
}
/* Valiate backup files*/
pgBackupValidate(current_backup);
pgBackupValidate(current_backup, NULL);
/* Validate corresponding WAL files */
if (current_backup->status == BACKUP_STATUS_OK)
@ -547,7 +567,7 @@ do_validate_instance(void)
/* For every OK backup we try to revalidate all his ORPHAN descendants. */
if (current_backup->status == BACKUP_STATUS_OK)
{
/* revalidate all ORPHAN descendats
/* revalidate all ORPHAN descendants
* be very careful not to miss a missing backup
* for every backup we must check that he is descendant of current_backup
*/
@ -592,8 +612,8 @@ do_validate_instance(void)
skipped_due_to_lock = true;
continue;
}
/* Revaliate backup files*/
pgBackupValidate(backup);
/* Revalidate backup files*/
pgBackupValidate(backup, NULL);
if (backup->status == BACKUP_STATUS_OK)
{

View File

@ -1,11 +1,11 @@
[см wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup)
```
Note: For now these are works on Linix and "kinda" works on Windows
Note: For now these are works on Linux and "kinda" works on Windows
```
```
Windows Note: For tablespaceses tests to work on Windows, you should explicitly(!) grant current user full access to tmp_dirs
Windows Note: For tablespaces tests to work on Windows, you should explicitly(!) grant current user full access to tmp_dirs
```
@ -23,12 +23,15 @@ Enable compatibility tests:
Specify path to pg_probackup binary file. By default tests use <Path to Git repository>/pg_probackup/
export PGPROBACKUPBIN=<path to pg_probackup>
Remote backup depends on key authentithication to local machine via ssh as current user.
Remote backup depends on key authentication to local machine via ssh as current user.
export PGPROBACKUP_SSH_REMOTE=ON
Run suit of basic simple tests:
export PG_PROBACKUP_TEST_BASIC=ON
Run ptrack tests:
export PG_PROBACKUP_PTRACK=ON
Usage:
pip install testgres

View File

@ -16,6 +16,10 @@ def load_tests(loader, tests, pattern):
if os.environ['PG_PROBACKUP_TEST_BASIC'] == 'ON':
loader.testMethodPrefix = 'test_basic'
if 'PG_PROBACKUP_PTRACK' in os.environ:
if os.environ['PG_PROBACKUP_PTRACK'] == 'ON':
suite.addTests(loader.loadTestsFromModule(ptrack))
# suite.addTests(loader.loadTestsFromModule(auth_test))
suite.addTests(loader.loadTestsFromModule(archive))
suite.addTests(loader.loadTestsFromModule(backup))
@ -37,7 +41,6 @@ def load_tests(loader, tests, pattern):
suite.addTests(loader.loadTestsFromModule(merge))
suite.addTests(loader.loadTestsFromModule(option))
suite.addTests(loader.loadTestsFromModule(page))
# suite.addTests(loader.loadTestsFromModule(ptrack))
suite.addTests(loader.loadTestsFromModule(pgpro560))
suite.addTests(loader.loadTestsFromModule(pgpro589))
suite.addTests(loader.loadTestsFromModule(pgpro2068))

View File

@ -47,7 +47,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node)
node.slow_start()
# Recreate backup calagoue
# Recreate backup catalog
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -228,10 +228,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s'}
)
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
@ -242,7 +240,6 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node,
options=[
"--archive-timeout=60",
"--stream",
"--log-level-file=info"],
gdb=True)
@ -255,27 +252,105 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
gdb.continue_execution_until_exit()
log_file = os.path.join(backup_dir, 'log/pg_probackup.log')
log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log')
with open(log_file, 'r') as f:
log_content = f.read()
self.assertNotIn(
"ERROR: pg_stop_backup doesn't answer",
log_content,
"pg_stop_backup timeouted")
# in PG =< 9.6 pg_stop_backup always wait
if self.get_version(node) < 100000:
self.assertIn(
"ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it",
log_content)
else:
self.assertIn(
"ERROR: Switched WAL segment 000000010000000000000002 "
"could not be archived in 60 seconds",
log_content)
log_file = os.path.join(node.logs_dir, 'postgresql.log')
with open(log_file, 'r') as f:
log_content = f.read()
self.assertNotIn(
'FailedAssertion',
log_content,
'PostgreSQL crashed because of a failed assert')
self.assertNotIn(
'FailedAssertion',
log_content,
'PostgreSQL crashed because of a failed assert')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_arhive_push_file_exists(self):
def test_pgpro434_4(self):
"""
Check pg_stop_backup_timeout, needed backup_timeout
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
gdb = self.backup_node(
backup_dir, 'node', node,
options=[
"--archive-timeout=60",
"--log-level-file=info"],
gdb=True)
gdb.set_breakpoint('pg_stop_backup')
gdb.run_until_break()
node.append_conf(
'postgresql.auto.conf', "archive_command = 'exit 1'")
node.reload()
os.environ["PGAPPNAME"] = "foo"
pid = node.safe_psql(
"postgres",
"SELECT pid "
"FROM pg_stat_activity "
"WHERE application_name = 'pg_probackup'").rstrip()
os.environ["PGAPPNAME"] = "pg_probackup"
postgres_gdb = self.gdb_attach(pid)
postgres_gdb.set_breakpoint('do_pg_stop_backup')
postgres_gdb.continue_execution_until_running()
gdb.continue_execution_until_exit()
# gdb._execute('detach')
log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log')
with open(log_file, 'r') as f:
log_content = f.read()
self.assertIn(
"ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it",
log_content)
log_file = os.path.join(node.logs_dir, 'postgresql.log')
with open(log_file, 'r') as f:
log_content = f.read()
self.assertNotIn(
'FailedAssertion',
log_content,
'PostgreSQL crashed because of a failed assert')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_archive_push_file_exists(self):
"""Archive-push if file exists"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -284,8 +359,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s'}
)
'checkpoint_timeout': '30s'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
@ -311,17 +386,33 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
"from generate_series(0,100500) i")
log_file = os.path.join(node.logs_dir, 'postgresql.log')
self.switch_wal_segment(node)
sleep(1)
with open(log_file, 'r') as f:
log_content = f.read()
self.assertTrue(
'LOG: archive command failed with exit code 1' in log_content and
'DETAIL: The failed archive command was:' in log_content and
'INFO: pg_probackup archive-push from' in log_content and
'ERROR: WAL segment ' in log_content and
'{0}" already exists.'.format(filename) in log_content,
'Expecting error messages about failed archive_command'
)
self.assertFalse('pg_probackup archive-push completed successfully' in log_content)
self.assertIn(
'LOG: archive command failed with exit code 1',
log_content)
self.assertIn(
'DETAIL: The failed archive command was:',
log_content)
self.assertIn(
'INFO: pg_probackup archive-push from',
log_content)
self.assertIn(
'ERROR: WAL segment ',
log_content)
self.assertIn(
'already exists.',
log_content)
self.assertNotIn(
'pg_probackup archive-push completed successfully', log_content)
if self.get_version(node) < 100000:
wal_src = os.path.join(
@ -342,15 +433,16 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
with open(log_file, 'r') as f:
log_content = f.read()
self.assertTrue(
'pg_probackup archive-push completed successfully' in log_content,
'Expecting messages about successfull execution archive_command')
self.assertIn(
'pg_probackup archive-push completed successfully',
log_content)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_arhive_push_file_exists_overwrite(self):
def test_archive_push_file_exists_overwrite(self):
"""Archive-push if file exists"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -359,8 +451,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s'}
)
'checkpoint_timeout': '30s'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
@ -386,16 +478,23 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
"from generate_series(0,100500) i")
log_file = os.path.join(node.logs_dir, 'postgresql.log')
self.switch_wal_segment(node)
sleep(1)
with open(log_file, 'r') as f:
log_content = f.read()
self.assertTrue(
'LOG: archive command failed with exit code 1' in log_content and
'DETAIL: The failed archive command was:' in log_content and
'INFO: pg_probackup archive-push from' in log_content and
'{0}" already exists.'.format(filename) in log_content,
'Expecting error messages about failed archive_command'
)
self.assertFalse('pg_probackup archive-push completed successfully' in log_content)
self.assertIn(
'LOG: archive command failed with exit code 1', log_content)
self.assertIn(
'DETAIL: The failed archive command was:', log_content)
self.assertIn(
'INFO: pg_probackup archive-push from', log_content)
self.assertIn(
'{0}" already exists.'.format(filename), log_content)
self.assertNotIn(
'pg_probackup archive-push completed successfully', log_content)
self.set_archiving(backup_dir, 'node', node, overwrite=True)
node.reload()
@ -413,7 +512,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_archive_push_partial_file_exists(self):
"""Archive-push if stale .partial file exists"""
"""Archive-push if stale '.partial' file exists"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -438,10 +537,16 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
"postgres",
"INSERT INTO t1 VALUES (1) RETURNING (xmin)").rstrip()
filename_orig = node.safe_psql(
"postgres",
"SELECT file_name "
"FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip()
if self.get_version(node) < 100000:
filename_orig = node.safe_psql(
"postgres",
"SELECT file_name "
"FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip()
else:
filename_orig = node.safe_psql(
"postgres",
"SELECT file_name "
"FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip()
# form up path to next .partial WAL segment
wals_dir = os.path.join(backup_dir, 'wal', 'node')
@ -508,10 +613,16 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
"postgres",
"create table t2()")
filename_orig = node.safe_psql(
"postgres",
"SELECT file_name "
"FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip()
if self.get_version(node) < 100000:
filename_orig = node.safe_psql(
"postgres",
"SELECT file_name "
"FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip()
else:
filename_orig = node.safe_psql(
"postgres",
"SELECT file_name "
"FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip()
# form up path to next .partial WAL segment
wals_dir = os.path.join(backup_dir, 'wal', 'node')

View File

@ -24,9 +24,9 @@ except ImportError:
class SimpleAuthTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_backup_via_unpriviledged_user(self):
def test_backup_via_unprivileged_user(self):
"""
Make node, create unpriviledged user, try to
Make node, create unprivileged user, try to
run a backups without EXECUTE rights on
certain functions
"""

View File

@ -2,6 +2,7 @@ import unittest
import os
from time import sleep
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
import shutil
module_name = 'backup'
@ -14,6 +15,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# PGPRO-707
def test_backup_modes_archive(self):
"""standart backup modes with ARCHIVE WAL method"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -108,6 +112,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_incremental_backup_without_full(self):
"""page-level backup without validated full backup"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -130,7 +137,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Valid backup on current timeline is not found. "
"ERROR: Valid backup on current timeline 1 is not found. "
"Create new FULL backup before an incremental one.",
e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
@ -148,7 +155,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Valid backup on current timeline is not found. "
"ERROR: Valid backup on current timeline 1 is not found. "
"Create new FULL backup before an incremental one.",
e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
@ -167,8 +174,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on'})
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
@ -212,7 +218,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Valid backup on current timeline is not found. "
"ERROR: Valid backup on current timeline 1 is not found. "
"Create new FULL backup before an incremental one.",
e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
@ -229,6 +235,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_ptrack_threads(self):
"""ptrack multi thread backup mode"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -257,6 +266,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_ptrack_threads_stream(self):
"""ptrack multi thread backup mode and stream"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -286,6 +298,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_page_corruption_heal_via_ptrack_1(self):
"""make node, corrupt some page, check that backup failed"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -342,6 +357,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_page_corruption_heal_via_ptrack_2(self):
"""make node, corrupt some page, check that backup failed"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -451,12 +469,16 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
node.stop()
with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
f.seek(9000)
f.write(b"bla")
f.flush()
f.close
node.slow_start()
try:
self.backup_node(
backup_dir, 'node', node,
@ -468,23 +490,33 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
if self.remote:
if self.ptrack:
self.assertTrue(
"ERROR: Failed to read file" in e.message and
"data file checksum mismatch" in e.message,
'WARNING: page verification failed, '
'calculated checksum' in e.message and
'ERROR: query failed: ERROR: '
'invalid page in block 1 of relation' in e.message and
'ERROR: Data files transferring failed' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
else:
self.assertIn(
'WARNING: Corruption detected in file',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertIn(
'ERROR: Data file corruption',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
if self.remote:
self.assertTrue(
"ERROR: Failed to read file" in e.message and
"data file checksum mismatch" in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
else:
self.assertIn(
'WARNING: Corruption detected in file',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertIn(
'ERROR: Data file corruption',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -611,7 +643,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
path = os.path.join(root, file)
list = list + [path]
# We expect that relfilenode occures only once
# We expect that relfilenode can be encountered only once
if len(list) > 1:
message = ""
for string in list:
@ -987,6 +1019,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_drop_rel_during_backup_ptrack(self):
""""""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1402,6 +1437,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_basic_missing_file_permissions(self):
""""""
if os.name == 'nt':
return unittest.skip('Skipped because it is POSIX only test')
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1446,6 +1484,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_basic_missing_dir_permissions(self):
""""""
if os.name == 'nt':
return unittest.skip('Skipped because it is POSIX only test')
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1491,10 +1532,12 @@ class BackupTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
# pg_options={'ptrack_enable': 'on'},
initdb_params=['--data-checksums'],
pg_options={
'archive_timeout': '30s'})
)
if self.ptrack:
node.append_conf('postgresql.auto.conf', 'ptrack_enable = on')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -1505,58 +1548,126 @@ class BackupTest(ProbackupTest, unittest.TestCase):
'postgres',
'CREATE DATABASE backupdb')
node.safe_psql(
'backupdb',
"REVOKE ALL ON DATABASE backupdb from PUBLIC; "
"REVOKE ALL ON SCHEMA public from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
"CREATE ROLE backup WITH LOGIN REPLICATION; "
"GRANT CONNECT ON DATABASE backupdb to backup; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
# for partial restore, checkdb and ptrack
"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
# for exclusive backup for PG 9.5 and ptrack
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
)
# PG 9.5
if self.get_version(node) < 90600:
node.safe_psql(
'backupdb',
"REVOKE ALL ON DATABASE backupdb from PUBLIC; "
"REVOKE ALL ON SCHEMA public from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
"CREATE ROLE backup WITH LOGIN REPLICATION; "
"GRANT CONNECT ON DATABASE backupdb to backup; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
)
# PG 9.6
elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
node.safe_psql(
'backupdb',
"REVOKE ALL ON DATABASE backupdb from PUBLIC; "
"REVOKE ALL ON SCHEMA public from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
"CREATE ROLE backup WITH LOGIN REPLICATION; "
"GRANT CONNECT ON DATABASE backupdb to backup; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
)
# >= 10
else:
node.safe_psql(
'backupdb',
"REVOKE ALL ON DATABASE backupdb from PUBLIC; "
"REVOKE ALL ON SCHEMA public from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
"CREATE ROLE backup WITH LOGIN REPLICATION; "
"GRANT CONNECT ON DATABASE backupdb to backup; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
)
# ptrack functions
# for fname in [
# 'oideq(oid, oid)',
# 'ptrack_version()',
# 'pg_ptrack_clear()',
# 'pg_ptrack_control_lsn()',
# 'pg_ptrack_get_and_clear_db(oid, oid)',
# 'pg_ptrack_get_and_clear(oid, oid)',
# 'pg_ptrack_get_block_2(oid, oid, oid, bigint)']:
# try:
# node.safe_psql(
# "backupdb",
# "GRANT EXECUTE ON FUNCTION pg_catalog.{0} "
# "TO backup".format(fname))
# except:
# pass
if self.ptrack:
for fname in [
'pg_catalog.oideq(oid, oid)',
'pg_catalog.ptrack_version()',
'pg_catalog.pg_ptrack_clear()',
'pg_catalog.pg_ptrack_control_lsn()',
'pg_catalog.pg_ptrack_get_and_clear_db(oid, oid)',
'pg_catalog.pg_ptrack_get_and_clear(oid, oid)',
'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)',
'pg_catalog.pg_stop_backup()']:
# try:
node.safe_psql(
"backupdb",
"GRANT EXECUTE ON FUNCTION {0} "
"TO backup".format(fname))
# except:
# pass
# FULL backup
self.backup_node(
@ -1583,12 +1694,352 @@ class BackupTest(ProbackupTest, unittest.TestCase):
datname='backupdb', options=['--stream', '-U', 'backup'])
# PTRACK
# self.backup_node(
# backup_dir, 'node', node, backup_type='ptrack',
# datname='backupdb', options=['-U', 'backup'])
# self.backup_node(
# backup_dir, 'node', node, backup_type='ptrack',
# datname='backupdb', options=['--stream', '-U', 'backup'])
if self.ptrack:
self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
datname='backupdb', options=['-U', 'backup'])
self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
datname='backupdb', options=['--stream', '-U', 'backup'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_parent_choosing(self):
"""
PAGE3 <- RUNNING(parent should be FULL)
PAGE2 <- OK
PAGE1 <- CORRUPT
FULL
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
full_id = self.backup_node(backup_dir, 'node', node)
# PAGE1
page1_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGE2
page2_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change PAGE1 to ERROR
self.change_backup_status(backup_dir, 'node', page1_id, 'ERROR')
# PAGE3
page3_id = self.backup_node(
backup_dir, 'node', node,
backup_type='page', options=['--log-level-file=LOG'])
log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log')
with open(log_file_path) as f:
log_file_content = f.read()
self.assertIn(
"WARNING: Backup {0} has invalid parent: {1}. "
"Cannot be a parent".format(page2_id, page1_id),
log_file_content)
self.assertIn(
"WARNING: Backup {0} has status: ERROR. "
"Cannot be a parent".format(page1_id),
log_file_content)
self.assertIn(
"Parent backup: {0}".format(full_id),
log_file_content)
self.assertEqual(
self.show_pb(
backup_dir, 'node', backup_id=page3_id)['parent-backup-id'],
full_id)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_parent_choosing_1(self):
"""
PAGE3 <- RUNNING(parent should be FULL)
PAGE2 <- OK
PAGE1 <- (missing)
FULL
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
full_id = self.backup_node(backup_dir, 'node', node)
# PAGE1
page1_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGE2
page2_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Delete PAGE1
shutil.rmtree(
os.path.join(backup_dir, 'backups', 'node', page1_id))
# PAGE3
page3_id = self.backup_node(
backup_dir, 'node', node,
backup_type='page', options=['--log-level-file=LOG'])
log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log')
with open(log_file_path) as f:
log_file_content = f.read()
self.assertIn(
"WARNING: Backup {0} has missing parent: {1}. "
"Cannot be a parent".format(page2_id, page1_id),
log_file_content)
self.assertIn(
"Parent backup: {0}".format(full_id),
log_file_content)
self.assertEqual(
self.show_pb(
backup_dir, 'node', backup_id=page3_id)['parent-backup-id'],
full_id)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_parent_choosing_2(self):
"""
PAGE3 <- RUNNING(backup should fail)
PAGE2 <- OK
PAGE1 <- OK
FULL <- (missing)
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
full_id = self.backup_node(backup_dir, 'node', node)
# PAGE1
page1_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGE2
page2_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Delete FULL
shutil.rmtree(
os.path.join(backup_dir, 'backups', 'node', full_id))
# PAGE3
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='page', options=['--log-level-file=LOG'])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because FULL backup is missing"
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: Valid backup on current timeline 1 is not found. '
'Create new FULL backup before an incremental one.',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertEqual(
self.show_pb(
backup_dir, 'node')[2]['status'],
'ERROR')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_backup_with_less_privileges_role(self):
"""
check permissions correctness from documentation:
https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md#configuring-the-database-cluster
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'archive_timeout': '30s',
'checkpoint_timeout': '30s'})
if self.ptrack:
node.append_conf('postgresql.auto.conf', 'ptrack_enable = on')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
'postgres',
'CREATE DATABASE backupdb')
# PG 9.5
if self.get_version(node) < 90600:
node.safe_psql(
'backupdb',
"BEGIN; "
"CREATE ROLE backup WITH LOGIN; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
"COMMIT;"
)
# PG 9.6
elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
node.safe_psql(
'backupdb',
"BEGIN; "
"CREATE ROLE backup WITH LOGIN; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
"COMMIT;"
)
# >= 10
else:
node.safe_psql(
'backupdb',
"BEGIN; "
"CREATE ROLE backup WITH LOGIN; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
"COMMIT;"
)
# enable STREAM backup
node.safe_psql(
'backupdb',
'ALTER ROLE backup WITH REPLICATION;')
# FULL backup
self.backup_node(
backup_dir, 'node', node,
datname='backupdb', options=['--stream', '-U', 'backup'])
self.backup_node(
backup_dir, 'node', node,
datname='backupdb', options=['-U', 'backup'])
# PAGE
self.backup_node(
backup_dir, 'node', node, backup_type='page',
datname='backupdb', options=['-U', 'backup'])
self.backup_node(
backup_dir, 'node', node, backup_type='page', datname='backupdb',
options=['--stream', '-U', 'backup'])
# DELTA
self.backup_node(
backup_dir, 'node', node, backup_type='delta',
datname='backupdb', options=['-U', 'backup'])
self.backup_node(
backup_dir, 'node', node, backup_type='delta',
datname='backupdb', options=['--stream', '-U', 'backup'])
# Restore as replica
replica = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'node', replica)
self.set_replica(node, replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# FULL backup from replica
self.backup_node(
backup_dir, 'replica', replica,
datname='backupdb', options=['--stream', '-U', 'backup'])
self.backup_node(
backup_dir, 'replica', replica, datname='backupdb',
options=['-U', 'backup', '--log-level-file=verbose'])
# PAGE backup from replica
self.backup_node(
backup_dir, 'replica', replica, backup_type='page',
datname='backupdb', options=['-U', 'backup'])
self.backup_node(
backup_dir, 'replica', replica, backup_type='page',
datname='backupdb', options=['--stream', '-U', 'backup'])
# DELTA backup from replica
self.backup_node(
backup_dir, 'replica', replica, backup_type='delta',
datname='backupdb', options=['-U', 'backup'])
self.backup_node(
backup_dir, 'replica', replica, backup_type='delta',
datname='backupdb', options=['--stream', '-U', 'backup'])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -229,7 +229,7 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
# create two databases
node.safe_psql("postgres", "create database db1")
try:
node.safe_psql(
node.safe_psql(
"db1",
"create extension amcheck")
except QueryException as e:
@ -330,7 +330,7 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
# corruption of both indexes in db1 and db2 must be detected
# also the that amcheck is not installed in 'postgres'
# musted be logged
# should be logged
with open(log_file_path) as f:
log_file_content = f.read()
self.assertIn(
@ -434,12 +434,12 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
repr(e.message), self.cmd))
self.assertIn(
"WARNING: CORRUPTION in file {0}, block 1".format(
'WARNING: Corruption detected in file "{0}", block 1'.format(
os.path.normpath(heap_full_path)),
e.message)
self.assertIn(
"WARNING: CORRUPTION in file {0}, block 5".format(
'WARNING: Corruption detected in file "{0}", block 5'.format(
os.path.normpath(heap_full_path)),
e.message)
@ -484,7 +484,7 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
gdb.remove_all_breakpoints()
gdb._execute('signal SIGINT')
gdb.continue_execution_until_exit()
gdb.continue_execution_until_error()
with open(node.pg_log_file, 'r') as f:
output = f.read()

View File

@ -217,6 +217,10 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_backward_compatibility_ptrack(self):
"""Description in jira issue PGPRO-434"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -224,8 +228,9 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'autovacuum': 'off'}
)
'autovacuum': 'off',
'ptrack_enable': 'on'})
self.init_pb(backup_dir, old_binary=True)
self.show_pb(backup_dir)
@ -262,7 +267,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Delta BACKUP with old binary
# ptrack BACKUP with old binary
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
@ -272,7 +277,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgbench.stdout.close()
self.backup_node(
backup_dir, 'node', node, backup_type='delta',
backup_dir, 'node', node, backup_type='ptrack',
old_binary=True)
if self.paranoia:
@ -287,7 +292,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Delta BACKUP with new binary
# Ptrack BACKUP with new binary
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
@ -297,7 +302,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgbench.stdout.close()
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
backup_dir, 'node', node, backup_type='ptrack')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)

View File

@ -23,11 +23,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s',
'ptrack_enable': 'on'}
)
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -59,15 +55,15 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
options=[
'--stream', '--compress-algorithm=zlib'])
# PTRACK BACKUP
# DELTA BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(512,768) i")
ptrack_result = node.execute("postgres", "SELECT * FROM t_heap")
ptrack_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
delta_result = node.execute("postgres", "SELECT * FROM t_heap")
delta_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='delta',
options=['--stream', '--compress-algorithm=zlib'])
# Drop Node
@ -105,11 +101,11 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.assertEqual(page_result, page_result_new)
node.cleanup()
# Check ptrack backup
# Check delta backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
"INFO: Restore of backup {0} completed.".format(delta_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=ptrack_backup_id,
backup_dir, 'node', node, backup_id=delta_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
@ -117,8 +113,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd))
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
delta_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(delta_result, delta_result_new)
node.cleanup()
# Clean after yourself
@ -135,11 +131,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s',
'ptrack_enable': 'on'}
)
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -167,14 +159,14 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type='page',
options=["--compress-algorithm=zlib"])
# PTRACK BACKUP
# DELTA BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,3) i")
ptrack_result = node.execute("postgres", "SELECT * FROM t_heap")
ptrack_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
delta_result = node.execute("postgres", "SELECT * FROM t_heap")
delta_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='delta',
options=['--compress-algorithm=zlib'])
# Drop Node
@ -212,11 +204,11 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.assertEqual(page_result, page_result_new)
node.cleanup()
# Check ptrack backup
# Check delta backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
"INFO: Restore of backup {0} completed.".format(delta_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=ptrack_backup_id,
backup_dir, 'node', node, backup_id=delta_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
@ -224,8 +216,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd))
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
delta_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(delta_result, delta_result_new)
node.cleanup()
# Clean after yourself
@ -242,11 +234,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s',
'ptrack_enable': 'on'}
)
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -275,15 +263,15 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type='page',
options=['--stream', '--compress-algorithm=pglz'])
# PTRACK BACKUP
# DELTA BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(512,768) i")
ptrack_result = node.execute("postgres", "SELECT * FROM t_heap")
ptrack_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
delta_result = node.execute("postgres", "SELECT * FROM t_heap")
delta_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='delta',
options=['--stream', '--compress-algorithm=pglz'])
# Drop Node
@ -321,11 +309,11 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.assertEqual(page_result, page_result_new)
node.cleanup()
# Check ptrack backup
# Check delta backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
"INFO: Restore of backup {0} completed.".format(delta_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=ptrack_backup_id,
backup_dir, 'node', node, backup_id=delta_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
@ -333,8 +321,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd))
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
delta_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(delta_result, delta_result_new)
node.cleanup()
# Clean after yourself
@ -351,11 +339,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s',
'ptrack_enable': 'on'}
)
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -384,15 +368,15 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type='page',
options=['--compress-algorithm=pglz'])
# PTRACK BACKUP
# DELTA BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(200,300) i")
ptrack_result = node.execute("postgres", "SELECT * FROM t_heap")
ptrack_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
delta_result = node.execute("postgres", "SELECT * FROM t_heap")
delta_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='delta',
options=['--compress-algorithm=pglz'])
# Drop Node
@ -430,11 +414,11 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.assertEqual(page_result, page_result_new)
node.cleanup()
# Check ptrack backup
# Check delta backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
"INFO: Restore of backup {0} completed.".format(delta_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=ptrack_backup_id,
backup_dir, 'node', node, backup_id=delta_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
@ -442,8 +426,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd))
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
delta_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(delta_result, delta_result_new)
node.cleanup()
# Clean after yourself
@ -460,11 +444,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s',
'ptrack_enable': 'on'}
)
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -492,9 +472,9 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_uncompressable_pages(self):
def test_incompressible_pages(self):
"""
make archive node, create table with uncompressable toast pages,
make archive node, create table with incompressible toast pages,
take backup with compression, make sure that page was not compressed,
restore backup and check data correctness
"""

View File

@ -89,7 +89,61 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_delete_archive_mix_compress_and_non_compressed_segments(self):
"""stub"""
"""delete full backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(
backup_dir, 'node', node, compress=False)
node.slow_start()
# full backup
self.backup_node(backup_dir, 'node', node)
node.pgbench_init(scale=10)
# Restart archiving with compression
self.set_archiving(backup_dir, 'node', node, compress=True)
node.restart()
# full backup
self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
pgbench.wait()
self.backup_node(
backup_dir, 'node', node,
options=[
'--retention-redundancy=3',
'--delete-expired'])
pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
pgbench.wait()
self.backup_node(
backup_dir, 'node', node,
options=[
'--retention-redundancy=3',
'--delete-expired'])
pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
pgbench.wait()
self.backup_node(
backup_dir, 'node', node,
options=[
'--retention-redundancy=3',
'--delete-expired'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_increment_page(self):
@ -134,6 +188,9 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_delete_increment_ptrack(self):
"""delete increment and all after him"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -358,11 +415,12 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
backup_id_a = self.backup_node(backup_dir, 'node', node)
backup_id_b = self.backup_node(backup_dir, 'node', node)
# Change FULL B backup status to ERROR
# Change FULLb to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# FULLb ERROR
# FULLa OK
# Take PAGEa1 backup
page_id_a1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -370,15 +428,17 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# PAGEa1 OK
# FULLb ERROR
# FULLa OK
# Change FULL B backup status to OK
# Change FULLb to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 backup status to ERROR
# Change PAGEa1 to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
page_id_b1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -386,41 +446,49 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# Now we start to play with first generation of PAGE backups
# Change PAGEb1 status to ERROR
# Change PAGEb1 and FULLb status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# Change PAGEa1 status to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
page_id_a2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
# Change PAGEa2 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
# Change PAGEb1 status to OK
# Change PAGEa2 and FULla to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
# Change PAGEb1 and FULlb to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change PAGEa2 status to OK
# Change PAGEa2 and FULLa status to OK
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# PAGEb2 OK
# PAGEa2 OK
@ -478,16 +546,15 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# Take FULL BACKUPs
backup_id_a = self.backup_node(backup_dir, 'node', node)
backup_id_b = self.backup_node(backup_dir, 'node', node)
# Change FULLb backup status to ERROR
# Change FULLb to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
page_id_a1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change FULLb backup status to OK
# Change FULLb to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 backup status to ERROR
@ -505,15 +572,16 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# FULLb OK
# FULLa OK
# Change PAGEa1 backup status to OK
# Change PAGEa1 to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
# Change PAGEb1 backup status to ERROR
# Change PAGEb1 and FULLb backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
page_id_a2 = self.backup_node(
@ -522,20 +590,22 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
# Change PAGEb1 backup status to OK
# Change PAGEb1 and FULLb to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa2 backup status to ERROR
# Change PAGEa2 and FULLa to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -545,17 +615,21 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
# Change PAGEb2 and PAGEb1 status to ERROR
# Change PAGEb2, PAGEb1 and FULLb to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# Change FULLa to OK
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
# PAGEb2 ERROR
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
page_id_a3 = self.backup_node(
@ -566,14 +640,15 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
# Change PAGEa3 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR')
# Change PAGEb2 status to OK
# Change PAGEb2 and FULLb to OK
self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
page_id_b3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -587,7 +662,7 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# FULLb OK
# FULLa OK
# Change PAGEa3, PAGEa2 and PAGEb1 status to OK
# Change PAGEa3, PAGEa2 and PAGEb1 to OK
self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')

View File

@ -1078,6 +1078,9 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_delta_corruption_heal_via_ptrack_1(self):
"""make node, corrupt some page, check that backup failed"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -1135,6 +1138,9 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_page_corruption_heal_via_ptrack_2(self):
"""make node, corrupt some page, check that backup failed"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),

View File

@ -20,10 +20,7 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'shared_buffers': '1GB', 'fsync': 'off', 'ptrack_enable': 'on'})
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -102,7 +99,7 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
def test_exclude_unlogged_tables_1(self):
"""
make node without archiving, create unlogged table, take full backup,
alter table to unlogged, take ptrack backup, restore ptrack backup,
alter table to unlogged, take delta backup, restore delta backup,
check that PGDATA`s are physically the same
"""
fname = self.id().split('.')[3]
@ -113,8 +110,7 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
initdb_params=['--data-checksums'],
pg_options={
'autovacuum': 'off',
"shared_buffers": "10MB",
'ptrack_enable': 'on'})
"shared_buffers": "10MB"})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -138,7 +134,7 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
node.safe_psql('postgres', "alter table test set logged")
self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
backup_dir, 'node', node, backup_type='delta',
options=['--stream']
)

View File

@ -26,9 +26,11 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--help]
pg_probackup show-config -B backup-path --instance=instance_name
[--format=format]
[--help]
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
[-D pgdata-path] [-C]
@ -55,6 +57,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--help]
pg_probackup restore -B backup-path --instance=instance_name
[-D pgdata-path] [-i backup-id] [-j num-threads]
@ -72,6 +75,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--help]
pg_probackup validate -B backup-path [--instance=instance_name]
[-i backup-id] [--progress] [-j num-threads]
@ -80,22 +84,27 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--recovery-target-timeline=timeline]
[--recovery-target-name=target-name]
[--skip-block-validation]
[--help]
pg_probackup checkdb [-B backup-path] [--instance=instance_name]
[-D pgdata-path] [--progress] [-j num-threads]
[--amcheck] [--skip-block-validation]
[--heapallindexed]
[--help]
pg_probackup show -B backup-path
[--instance=instance_name [-i backup-id]]
[--format=format]
[--help]
pg_probackup delete -B backup-path --instance=instance_name
[--wal] [-i backup-id | --expired | --merge-expired]
[--dry-run]
[--help]
pg_probackup merge -B backup-path --instance=instance_name
-i backup-id [--progress] [-j num-threads]
[--help]
pg_probackup add-instance -B backup-path -D pgdata-path
--instance=instance_name
@ -103,9 +112,11 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--help]
pg_probackup del-instance -B backup-path
--instance=instance_name
[--help]
pg_probackup archive-push -B backup-path --instance=instance_name
--wal-file-path=wal-file-path
@ -117,6 +128,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--help]
pg_probackup archive-get -B backup-path --instance=instance_name
--wal-file-path=wal-file-path
@ -124,6 +136,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--help]
Read the website for details. <https://github.com/postgrespro/pg_probackup>
Report bugs to <https://github.com/postgrespro/pg_probackup/issues>.

View File

@ -1 +1 @@
pg_probackup 2.1.3
pg_probackup 2.1.5

View File

@ -57,8 +57,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on'})
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
@ -114,6 +113,10 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
def test_ptrack_concurrent_get_and_clear_1(self):
"""make node, make full and ptrack stream backups,"
" restore them and check data correctness"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -195,6 +198,10 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
def test_ptrack_concurrent_get_and_clear_2(self):
"""make node, make full and ptrack stream backups,"
" restore them and check data correctness"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(

View File

@ -130,7 +130,7 @@ def slow_start(self, replica=False):
self.start()
while True:
try:
if self.safe_psql('postgres', query) == 't\n':
if self.safe_psql('template1', query) == 't\n':
break
except testgres.QueryException as e:
if 'database system is starting up' in e[0]:
@ -271,7 +271,15 @@ class ProbackupTest(object):
self.remote_user = None
if 'PGPROBACKUP_SSH_REMOTE' in self.test_env:
self.remote = True
if self.test_env['PGPROBACKUP_SSH_REMOTE'] == 'ON':
self.remote = True
self.ptrack = False
if 'PG_PROBACKUP_PTRACK' in self.test_env:
if self.test_env['PG_PROBACKUP_PTRACK'] == 'ON':
self.ptrack = True
os.environ["PGAPPNAME"] = "pg_probackup"
@property
def pg_config_version(self):
@ -296,8 +304,6 @@ class ProbackupTest(object):
# print('PGPROBACKUP_SSH_USER is not set')
# exit(1)
def make_simple_node(
self,
base_dir=None,
@ -342,6 +348,10 @@ class ProbackupTest(object):
'postgresql.auto.conf',
'max_wal_senders = 10')
# set major version
with open(os.path.join(node.data_dir, 'PG_VERSION')) as f:
node.major_version = f.read().rstrip()
return node
def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False):
@ -1029,10 +1039,10 @@ class ProbackupTest(object):
archive_command = archive_command + '--overwrite '
if os.name == 'posix':
archive_command = archive_command + '--wal-file-path %p --wal-file-name %f'
archive_command = archive_command + '--wal-file-path=%p --wal-file-name=%f'
elif os.name == 'nt':
archive_command = archive_command + '--wal-file-path "%p" --wal-file-name "%f"'
archive_command = archive_command + '--wal-file-path="%p" --wal-file-name="%f"'
node.append_conf(
'postgresql.auto.conf',
@ -1319,8 +1329,6 @@ class ProbackupTest(object):
os.path.join(restored_pgdata['pgdata'], directory),
restored_pgdata['dirs'][directory]['mode'])
for directory in original_pgdata['dirs']:
if directory not in restored_pgdata['dirs']:
fail = True
@ -1328,7 +1336,6 @@ class ProbackupTest(object):
error_message += ' in restored PGDATA: {0}\n'.format(
os.path.join(restored_pgdata['pgdata'], directory))
for file in restored_pgdata['files']:
# File is present in RESTORED PGDATA
# but not present in ORIGINAL
@ -1410,7 +1417,7 @@ class ProbackupTest(object):
else:
error_message += (
'\nFile dissappearance.\n '
'\nFile disappearance.\n '
'File: {0}\n').format(
os.path.join(restored_pgdata['pgdata'], file)
)

View File

@ -15,7 +15,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
"""
make node, take full backup, stop it in the middle
run validate, expect it to successfully executed,
concurrect RUNNING backup with pid file and active process is legal
concurrent RUNNING backup with pid file and active process is legal
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
@ -406,7 +406,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_locking_concurrent_vaidate_and_backup(self):
def test_locking_concurrent_validate_and_backup(self):
"""
make node, take full backup, launch validate
and stop it in the middle, take page backup.

View File

@ -142,7 +142,7 @@ class LogTest(ProbackupTest, unittest.TestCase):
log_file_size)
self.assertNotIn(
'WARNING:',
'WARNING: cannot read creation timestamp from rotation file',
output)
self.assertTrue(os.path.isfile(rotation_file_path))
@ -166,7 +166,6 @@ class LogTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node',
options=['--log-rotation-age=1d'])
self.backup_node(
backup_dir, 'node', node,
options=[
@ -212,7 +211,7 @@ class LogTest(ProbackupTest, unittest.TestCase):
return_id=False)
self.assertNotIn(
'WARNING:',
'WARNING: missing rotation file:',
output)
# check that log file wasn`t rotated
@ -291,7 +290,7 @@ class LogTest(ProbackupTest, unittest.TestCase):
return_id=False)
self.assertNotIn(
'WARNING:',
'WARNING: rotation file',
output)
# check that log file wasn`t rotated

View File

@ -826,6 +826,9 @@ class MergeTest(ProbackupTest, unittest.TestCase):
take page backup, merge full and page,
restore last page backup and check data correctness
"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1342,7 +1345,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_merge_different_compression_algo(self):
"""
Check that backups with different compression algorihtms can be merged
Check that backups with different compression algorithms can be merged
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -1537,8 +1540,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
gdb.set_breakpoint('write_backup_filelist')
gdb.run_until_break()
gdb.set_breakpoint('print_file_list')
gdb.continue_execution_until_break()
gdb.set_breakpoint('fio_fwrite')
gdb.continue_execution_until_break(2)
gdb._execute('signal SIGKILL')
@ -1628,8 +1631,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
gdb.set_breakpoint('write_backup_filelist')
gdb.run_until_break()
gdb.set_breakpoint('print_file_list')
gdb.continue_execution_until_break()
gdb.set_breakpoint('fio_fwrite')
gdb.continue_execution_until_break(2)
gdb._execute('signal SIGKILL')
@ -1651,8 +1654,6 @@ class MergeTest(ProbackupTest, unittest.TestCase):
os.remove(file_to_remove)
# Try to continue failed MERGE
#print(backup_id)
#exit(1)
self.merge_backup(backup_dir, "node", backup_id)
self.assertEqual(
@ -1875,15 +1876,28 @@ class MergeTest(ProbackupTest, unittest.TestCase):
pgbench = node.pgbench(options=['-T', '3', '-c', '2', '--no-vacuum'])
pgbench.wait()
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
pgdata = self.pgdata_content(node.data_dir)
node.cleanup()
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
backup_dir, 'node',
node_restored, backup_id=backup_id)
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# check that merged backup has the same state as
node_restored.cleanup()
self.merge_backup(backup_dir, 'node', backup_id=backup_id)
self.restore_node(backup_dir, 'node', node, backup_id=backup_id)
pgdata_restored = self.pgdata_content(node.data_dir)
self.restore_node(
backup_dir, 'node',
node_restored, backup_id=backup_id)
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
@ -1941,15 +1955,16 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# FULLb OK
# FULLa OK
# Change PAGEa1 backup status to OK
# Change PAGEa1 to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
# Change PAGEb1 backup status to ERROR
# Change PAGEb1 and FULLb to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
page_id_a2 = self.backup_node(
@ -1958,20 +1973,22 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
# Change PAGEb1 backup status to OK
# Change PAGEb1 and FULLb to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa2 backup status to ERROR
# Change PAGEa2 and FULL to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -1981,17 +1998,21 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
# Change PAGEb2 and PAGEb1 status to ERROR
# Change PAGEb2, PAGEb1 and FULLb to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# Change FULLa to OK
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
# PAGEb2 ERROR
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
page_id_a3 = self.backup_node(
@ -2002,14 +2023,16 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
# Change PAGEa3 status to ERROR
# Change PAGEa3 and FULLa to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR')
# Change PAGEb2 status to OK
# Change PAGEb2, PAGEb1 and FULLb to OK
self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
page_id_b3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -2018,15 +2041,15 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# PAGEa3 ERROR
# PAGEb2 OK
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
# Change PAGEa3, PAGEa2 and PAGEb1 status to OK
# Change PAGEa3, PAGEa2 and FULLa status to OK
self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
# PAGEb3 OK
# PAGEa3 OK

View File

@ -906,7 +906,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100000) i;")
# copy lastest wal segment
# copy latest wal segment
wals_dir = os.path.join(backup_dir, 'wal', 'alien_node')
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
wals_dir, f)) and not f.endswith('.backup')]

View File

@ -1,8 +1,9 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
import subprocess
from time import sleep
module_name = 'pgpro560'
@ -30,19 +31,22 @@ class CheckSystemID(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
file = os.path.join(node.base_dir,'data', 'global', 'pg_control')
file = os.path.join(node.base_dir, 'data', 'global', 'pg_control')
os.remove(file)
try:
self.backup_node(backup_dir, 'node', node, options=['--stream'])
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because pg_control was deleted.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
self.assertEqual(
1, 0,
"Expecting Error because pg_control was deleted.\n "
"Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: could not open file' in e.message
and 'pg_control' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
'ERROR: could not open file' in e.message and
'pg_control' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -75,24 +79,52 @@ class CheckSystemID(ProbackupTest, unittest.TestCase):
try:
self.backup_node(backup_dir, 'node1', node2, options=['--stream'])
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of SYSTEM ID mismatch.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
self.assertEqual(
1, 0,
"Expecting Error because of SYSTEM ID mismatch.\n "
"Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: Backup data directory was initialized for system id' in e.message
and 'but connected instance system id is' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
if self.get_version(node1) > 90600:
self.assertTrue(
'ERROR: Backup data directory was '
'initialized for system id' in e.message and
'but connected instance system id is' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
else:
self.assertIn(
'ERROR: System identifier mismatch. '
'Connected PostgreSQL instance has system id',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
sleep(1)
try:
self.backup_node(backup_dir, 'node1', node2, data_dir=node1.data_dir, options=['--stream'])
self.backup_node(
backup_dir, 'node1', node2,
data_dir=node1.data_dir, options=['--stream'])
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of of SYSTEM ID mismatch.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
self.assertEqual(
1, 0,
"Expecting Error because of of SYSTEM ID mismatch.\n "
"Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: Backup data directory was initialized for system id' in e.message
and 'but connected instance system id is' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
if self.get_version(node1) > 90600:
self.assertTrue(
'ERROR: Backup data directory was initialized '
'for system id' in e.message and
'but connected instance system id is' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
else:
self.assertIn(
'ERROR: System identifier mismatch. '
'Connected PostgreSQL instance has system id',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -27,7 +27,7 @@ class ArchiveCheck(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
# make erroneus archive_command
# make erroneous archive_command
node.append_conf("postgresql.auto.conf", "archive_command = 'exit 0'")
node.slow_start()

View File

@ -122,8 +122,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_uncommited_xact(self):
"""make ptrack backup while there is uncommited open transaction"""
def test_ptrack_uncommitted_xact(self):
"""make ptrack backup while there is uncommitted open transaction"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(

View File

@ -18,6 +18,9 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
make node, take full backup, restore it and make replica from it,
take full stream backup from replica
"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(

View File

@ -8,6 +8,8 @@ from time import sleep
from datetime import datetime, timedelta
import hashlib
import shutil
import json
from testgres import QueryException
module_name = 'restore'
@ -492,6 +494,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_restore_full_ptrack_archive(self):
"""recovery to latest from archive full+ptrack backups"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -540,6 +545,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_restore_ptrack(self):
"""recovery to latest from archive full+ptrack+ptrack backups"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -595,6 +603,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_restore_full_ptrack_stream(self):
"""recovery in stream mode to latest from full + ptrack backups"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -647,6 +658,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
recovery to latest from full + ptrack backups
with loads when ptrack backup do
"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -711,6 +725,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
recovery to latest from full + page backups
with loads when full backup do
"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -806,7 +823,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because restore destionation is not empty.\n "
"Expecting Error because restore destination is not empty.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
@ -2028,33 +2045,6 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.slow_start()
# Restore with recovery target lsn
node.cleanup()
self.restore_node(
backup_dir, 'node', node,
options=[
'--recovery-target-lsn={0}'.format(target_lsn),
"--recovery-target-action=promote",
'--recovery-target-timeline=1',
])
with open(recovery_conf, 'r') as f:
recovery_conf_content = f.read()
self.assertIn(
"recovery_target_lsn = '{0}'".format(target_lsn),
recovery_conf_content)
self.assertIn(
"recovery_target_action = 'promote'",
recovery_conf_content)
self.assertIn(
"recovery_target_timeline = '1'",
recovery_conf_content)
node.slow_start()
# Restore with recovery target name
node.cleanup()
self.restore_node(
@ -2082,6 +2072,35 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.slow_start()
# Restore with recovery target lsn
if self.get_version(node) >= 100000:
node.cleanup()
self.restore_node(
backup_dir, 'node', node,
options=[
'--recovery-target-lsn={0}'.format(target_lsn),
"--recovery-target-action=promote",
'--recovery-target-timeline=1',
])
with open(recovery_conf, 'r') as f:
recovery_conf_content = f.read()
self.assertIn(
"recovery_target_lsn = '{0}'".format(target_lsn),
recovery_conf_content)
self.assertIn(
"recovery_target_action = 'promote'",
recovery_conf_content)
self.assertIn(
"recovery_target_timeline = '1'",
recovery_conf_content)
node.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -2340,3 +2359,820 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_partial_restore_exclude(self):
""""""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
for i in range(1, 10, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
db_list_raw = node.safe_psql(
'postgres',
'SELECT to_json(a) '
'FROM (SELECT oid, datname FROM pg_database) a').rstrip()
db_list_splitted = db_list_raw.splitlines()
db_list = {}
for line in db_list_splitted:
line = json.loads(line)
db_list[line['datname']] = line['oid']
# FULL backup
backup_id = self.backup_node(backup_dir, 'node', node)
pgdata = self.pgdata_content(node.data_dir)
# restore FULL backup
node_restored_1 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored_1'))
node_restored_1.cleanup()
try:
self.restore_node(
backup_dir, 'node',
node_restored_1, options=[
"--db-include=db1",
"--db-exclude=db2"])
self.assertEqual(
1, 0,
"Expecting Error because of 'db-exclude' and 'db-include'.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: You cannot specify '--db-include' "
"and '--db-exclude' together", e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.restore_node(
backup_dir, 'node', node_restored_1)
pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir)
self.compare_pgdata(pgdata, pgdata_restored_1)
db1_path = os.path.join(
node_restored_1.data_dir, 'base', db_list['db1'])
db5_path = os.path.join(
node_restored_1.data_dir, 'base', db_list['db5'])
self.truncate_every_file_in_dir(db1_path)
self.truncate_every_file_in_dir(db5_path)
pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir)
node_restored_2 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored_2'))
node_restored_2.cleanup()
self.restore_node(
backup_dir, 'node',
node_restored_2, options=[
"--db-exclude=db1",
"--db-exclude=db5"])
pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir)
self.compare_pgdata(pgdata_restored_1, pgdata_restored_2)
node_restored_2.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored_2.port))
node_restored_2.slow_start()
node_restored_2.safe_psql(
'postgres',
'select 1')
try:
node_restored_2.safe_psql(
'db1',
'select 1')
except QueryException as e:
self.assertIn('FATAL', e.message)
try:
node_restored_2.safe_psql(
'db5',
'select 1')
except QueryException as e:
self.assertIn('FATAL', e.message)
with open(node_restored_2.pg_log_file, 'r') as f:
output = f.read()
self.assertNotIn('PANIC', output)
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_partial_restore_exclude_tablespace(self):
""""""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
cat_version = node.get_control_data()["Catalog version number"]
version_specific_dir = 'PG_' + node.major_version + '_' + cat_version
# PG_10_201707211
# pg_tblspc/33172/PG_9.5_201510051/16386/
self.create_tblspace_in_node(node, 'somedata')
node_tablespace = self.get_tblspace_path(node, 'somedata')
tbl_oid = node.safe_psql(
'postgres',
"SELECT oid "
"FROM pg_tablespace "
"WHERE spcname = 'somedata'").rstrip()
for i in range(1, 10, 1):
node.safe_psql(
'postgres',
'CREATE database db{0} tablespace somedata'.format(i))
db_list_raw = node.safe_psql(
'postgres',
'SELECT to_json(a) '
'FROM (SELECT oid, datname FROM pg_database) a').rstrip()
db_list_splitted = db_list_raw.splitlines()
db_list = {}
for line in db_list_splitted:
line = json.loads(line)
db_list[line['datname']] = line['oid']
# FULL backup
backup_id = self.backup_node(backup_dir, 'node', node)
pgdata = self.pgdata_content(node.data_dir)
# restore FULL backup
node_restored_1 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored_1'))
node_restored_1.cleanup()
node1_tablespace = self.get_tblspace_path(node_restored_1, 'somedata')
self.restore_node(
backup_dir, 'node',
node_restored_1, options=[
"-T", "{0}={1}".format(
node_tablespace, node1_tablespace)])
pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir)
self.compare_pgdata(pgdata, pgdata_restored_1)
# truncate every db
for db in db_list:
# with exception below
if db in ['db1', 'db5']:
self.truncate_every_file_in_dir(
os.path.join(
node_restored_1.data_dir, 'pg_tblspc',
tbl_oid, version_specific_dir, db_list[db]))
pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir)
node_restored_2 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored_2'))
node_restored_2.cleanup()
node2_tablespace = self.get_tblspace_path(node_restored_2, 'somedata')
self.restore_node(
backup_dir, 'node',
node_restored_2, options=[
"--db-exclude=db1",
"--db-exclude=db5",
"-T", "{0}={1}".format(
node_tablespace, node2_tablespace)])
pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir)
self.compare_pgdata(pgdata_restored_1, pgdata_restored_2)
node_restored_2.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored_2.port))
node_restored_2.slow_start()
node_restored_2.safe_psql(
'postgres',
'select 1')
try:
node_restored_2.safe_psql(
'db1',
'select 1')
except QueryException as e:
self.assertIn('FATAL', e.message)
try:
node_restored_2.safe_psql(
'db5',
'select 1')
except QueryException as e:
self.assertIn('FATAL', e.message)
with open(node_restored_2.pg_log_file, 'r') as f:
output = f.read()
self.assertNotIn('PANIC', output)
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_partial_restore_include(self):
"""
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
for i in range(1, 10, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
db_list_raw = node.safe_psql(
'postgres',
'SELECT to_json(a) '
'FROM (SELECT oid, datname FROM pg_database) a').rstrip()
db_list_splitted = db_list_raw.splitlines()
db_list = {}
for line in db_list_splitted:
line = json.loads(line)
db_list[line['datname']] = line['oid']
# FULL backup
backup_id = self.backup_node(backup_dir, 'node', node)
pgdata = self.pgdata_content(node.data_dir)
# restore FULL backup
node_restored_1 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored_1'))
node_restored_1.cleanup()
try:
self.restore_node(
backup_dir, 'node',
node_restored_1, options=[
"--db-include=db1",
"--db-exclude=db2"])
self.assertEqual(
1, 0,
"Expecting Error because of 'db-exclude' and 'db-include'.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: You cannot specify '--db-include' "
"and '--db-exclude' together", e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.restore_node(
backup_dir, 'node', node_restored_1)
pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir)
self.compare_pgdata(pgdata, pgdata_restored_1)
# truncate every db
for db in db_list:
# with exception below
if db in ['template0', 'template1', 'postgres', 'db1', 'db5']:
continue
self.truncate_every_file_in_dir(
os.path.join(
node_restored_1.data_dir, 'base', db_list[db]))
pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir)
node_restored_2 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored_2'))
node_restored_2.cleanup()
self.restore_node(
backup_dir, 'node',
node_restored_2, options=[
"--db-include=db1",
"--db-include=db5",
"--db-include=postgres"])
pgdata_restored_2 = self.pgdata_content(node_restored_2.data_dir)
self.compare_pgdata(pgdata_restored_1, pgdata_restored_2)
node_restored_2.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored_2.port))
node_restored_2.slow_start()
node_restored_2.safe_psql(
'db1',
'select 1')
node_restored_2.safe_psql(
'db5',
'select 1')
node_restored_2.safe_psql(
'template1',
'select 1')
try:
node_restored_2.safe_psql(
'db2',
'select 1')
except QueryException as e:
self.assertIn('FATAL', e.message)
try:
node_restored_2.safe_psql(
'db10',
'select 1')
except QueryException as e:
self.assertIn('FATAL', e.message)
with open(node_restored_2.pg_log_file, 'r') as f:
output = f.read()
self.assertNotIn('PANIC', output)
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_partial_restore_backward_compatibility_1(self):
"""
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir, old_binary=True)
self.add_instance(backup_dir, 'node', node, old_binary=True)
node.slow_start()
# create databases
for i in range(1, 10, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
# FULL backup with old binary, without partial restore support
backup_id = self.backup_node(
backup_dir, 'node', node,
old_binary=True, options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
try:
self.restore_node(
backup_dir, 'node',
node_restored, options=[
"--db-exclude=db5"])
self.assertEqual(
1, 0,
"Expecting Error because backup do not support partial restore.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Backup {0} has missing database_map".format(backup_id),
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.restore_node(backup_dir, 'node', node_restored)
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# incremental backup with partial restore support
for i in range(11, 15, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
# get db list
db_list_raw = node.safe_psql(
'postgres',
'SELECT to_json(a) '
'FROM (SELECT oid, datname FROM pg_database) a').rstrip()
db_list_splitted = db_list_raw.splitlines()
db_list = {}
for line in db_list_splitted:
line = json.loads(line)
db_list[line['datname']] = line['oid']
backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--stream'])
# get etalon
node_restored.cleanup()
self.restore_node(backup_dir, 'node', node_restored)
self.truncate_every_file_in_dir(
os.path.join(
node_restored.data_dir, 'base', db_list['db5']))
self.truncate_every_file_in_dir(
os.path.join(
node_restored.data_dir, 'base', db_list['db14']))
pgdata_restored = self.pgdata_content(node_restored.data_dir)
# get new node
node_restored_1 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored_1'))
node_restored_1.cleanup()
self.restore_node(
backup_dir, 'node',
node_restored_1, options=[
"--db-exclude=db5",
"--db-exclude=db14"])
pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir)
self.compare_pgdata(pgdata_restored, pgdata_restored_1)
def test_partial_restore_backward_compatibility_merge(self):
"""
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir, old_binary=True)
self.add_instance(backup_dir, 'node', node, old_binary=True)
node.slow_start()
# create databases
for i in range(1, 10, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
# FULL backup with old binary, without partial restore support
backup_id = self.backup_node(
backup_dir, 'node', node,
old_binary=True, options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
try:
self.restore_node(
backup_dir, 'node',
node_restored, options=[
"--db-exclude=db5"])
self.assertEqual(
1, 0,
"Expecting Error because backup do not support partial restore.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Backup {0} has missing database_map".format(backup_id),
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.restore_node(backup_dir, 'node', node_restored)
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# incremental backup with partial restore support
for i in range(11, 15, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
# get db list
db_list_raw = node.safe_psql(
'postgres',
'SELECT to_json(a) '
'FROM (SELECT oid, datname FROM pg_database) a').rstrip()
db_list_splitted = db_list_raw.splitlines()
db_list = {}
for line in db_list_splitted:
line = json.loads(line)
db_list[line['datname']] = line['oid']
backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--stream'])
# get etalon
node_restored.cleanup()
self.restore_node(backup_dir, 'node', node_restored)
self.truncate_every_file_in_dir(
os.path.join(
node_restored.data_dir, 'base', db_list['db5']))
self.truncate_every_file_in_dir(
os.path.join(
node_restored.data_dir, 'base', db_list['db14']))
pgdata_restored = self.pgdata_content(node_restored.data_dir)
# get new node
node_restored_1 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored_1'))
node_restored_1.cleanup()
# merge
self.merge_backup(backup_dir, 'node', backup_id=backup_id)
self.restore_node(
backup_dir, 'node',
node_restored_1, options=[
"--db-exclude=db5",
"--db-exclude=db14"])
pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir)
self.compare_pgdata(pgdata_restored, pgdata_restored_1)
def test_missing_database_map(self):
"""
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
# create databases
for i in range(1, 10, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
node.safe_psql(
"postgres",
"CREATE DATABASE backupdb")
if self.get_version(node) > self.version_to_num('10.0'):
# bootstrap for 10/11
node.safe_psql(
"backupdb",
"REVOKE ALL on SCHEMA public from public; "
"REVOKE ALL on SCHEMA pg_catalog from public; "
"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM public; "
"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM public; "
"CREATE ROLE backup WITH LOGIN REPLICATION; "
"GRANT CONNECT ON DATABASE postgres to backup; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
# we use it for partial restore and checkdb
# "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_clear() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_get_and_clear(oid, oid) TO backup; "
# "GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, oid) TO backup;"
)
else:
# bootstrap for 9.5/9.6
node.safe_psql(
"backupdb",
"REVOKE ALL on SCHEMA public from public; "
"REVOKE ALL on SCHEMA pg_catalog from public; "
"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM public; "
"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM public; "
"CREATE ROLE backup WITH LOGIN REPLICATION; "
"GRANT CONNECT ON DATABASE postgres to backup; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
# we use it for ptrack
"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
# we use it for partial restore and checkdb
# "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_clear() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_get_and_clear(oid, oid) TO backup; "
# "GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, oid) TO backup;"
)
# FULL backup without database_map
backup_id = self.backup_node(
backup_dir, 'node', node, datname='backupdb',
options=['--stream', "-U", "backup"])
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
# backup has missing database_map and that is legal
try:
self.restore_node(
backup_dir, 'node', node_restored,
options=["--db-exclude=db5", "--db-exclude=db9"])
self.assertEqual(
1, 0,
"Expecting Error because user do not have pg_database access.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Backup {0} has missing database_map, "
"partial restore is impossible.".format(
backup_id), e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
try:
self.restore_node(
backup_dir, 'node', node_restored,
options=["--db-include=db1"])
self.assertEqual(
1, 0,
"Expecting Error because user do not have pg_database access.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Backup {0} has missing database_map, "
"partial restore is impossible.".format(
backup_id), e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# check that simple restore is still possible
self.restore_node(backup_dir, 'node', node_restored)
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
def test_empty_and_mangled_database_map(self):
"""
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
# create databases
for i in range(1, 10, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
# FULL backup with database_map
backup_id = self.backup_node(
backup_dir, 'node', node, options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
# truncate database_map
path = os.path.join(
backup_dir, 'backups', 'node',
backup_id, 'database', 'database_map')
with open(path, "w") as f:
f.close()
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
try:
self.restore_node(
backup_dir, 'node', node_restored,
options=["--db-include=db1", '--no-validate'])
self.assertEqual(
1, 0,
"Expecting Error because database_map is empty.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Backup {0} has empty or mangled database_map, "
"partial restore is impossible".format(backup_id), e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
try:
self.restore_node(
backup_dir, 'node', node_restored,
options=["--db-exclude=db1", '--no-validate'])
self.assertEqual(
1, 0,
"Expecting Error because database_map is empty.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Backup {0} has empty or mangled database_map, "
"partial restore is impossible".format(backup_id), e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# mangle database_map
with open(path, "w") as f:
f.write("42")
f.close()
try:
self.restore_node(
backup_dir, 'node', node_restored,
options=["--db-include=db1", '--no-validate'])
self.assertEqual(
1, 0,
"Expecting Error because database_map is empty.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: field "dbOid" is not found in the line 42 of '
'the file backup_content.control', e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
try:
self.restore_node(
backup_dir, 'node', node_restored,
options=["--db-exclude=db1", '--no-validate'])
self.assertEqual(
1, 0,
"Expecting Error because database_map is empty.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: field "dbOid" is not found in the line 42 of '
'the file backup_content.control', e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# check that simple restore is still possible
self.restore_node(
backup_dir, 'node', node_restored, options=['--no-validate'])
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)

View File

@ -1,7 +1,7 @@
import os
import unittest
from datetime import datetime, timedelta
from .helpers.ptrack_helpers import ProbackupTest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from time import sleep
@ -247,11 +247,12 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
backup_id_a = self.backup_node(backup_dir, 'node', node)
backup_id_b = self.backup_node(backup_dir, 'node', node)
# Change FULL B backup status to ERROR
# Change FULLb backup status to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# FULLb ERROR
# FULLa OK
# Take PAGEa1 backup
page_id_a1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -259,57 +260,69 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# PAGEa1 OK
# FULLb ERROR
# FULLa OK
# Change FULL B backup status to OK
# Change FULLb backup status to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 backup status to ERROR
# Change PAGEa1 and FULLa to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# FULLa ERROR
page_id_b1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb1 OK
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# Now we start to play with first generation of PAGE backups
# Change PAGEb1 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
# FULLa ERROR
# Change PAGEa1 status to OK
# Now we start to play with first generation of PAGE backups
# Change PAGEb1 and FULLb to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# Change PAGEa1 and FULLa to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
page_id_a2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
# Change PAGEa2 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
# Change PAGEb1 status to OK
# Change PAGEa2 and FULLa to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
# Change PAGEb1 and FULLb to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change PAGEa2 status to OK
# Change PAGEa2 and FULla to OK
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
# PAGEb2 OK
# PAGEa2 OK
@ -321,14 +334,12 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# Purge backups
backups = os.path.join(backup_dir, 'backups', 'node')
for backup in os.listdir(backups):
if backup in [page_id_a2, page_id_b2, 'pg_probackup.conf']:
continue
with open(
os.path.join(
backups, backup, "backup.control"), "a") as conf:
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() - timedelta(days=3)))
if backup not in [page_id_a2, page_id_b2, 'pg_probackup.conf']:
with open(
os.path.join(
backups, backup, "backup.control"), "a") as conf:
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() - timedelta(days=3)))
self.delete_expired(
backup_dir, 'node',
@ -372,32 +383,38 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# PAGEa1 OK
# FULLb ERROR
# FULLa OK
# Change FULL B backup status to OK
# Change FULLb backup status to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 backup status to ERROR
# Change PAGEa1 and FULLa backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# FULLa ERROR
page_id_b1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb1 OK
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# FULLa ERROR
# Now we start to play with first generation of PAGE backups
# Change PAGEb1 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# Change PAGEa1 status to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
page_id_a2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -405,24 +422,28 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
# Change PAGEa2 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
# Change PAGEb1 status to OK
# Change PAGEa2 and FULLa status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
# Change PAGEb1 and FULLb status to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change PAGEa2 status to OK
# Change PAGEa2 and FULLa status to OK
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
# PAGEb2 OK
# PAGEa2 OK
@ -461,11 +482,12 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
backup_id_a = self.backup_node(backup_dir, 'node', node)
backup_id_b = self.backup_node(backup_dir, 'node', node)
# Change FULL B backup status to ERROR
# Change FULLb backup status to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# FULLb ERROR
# FULLa OK
# Take PAGEa1 backup
page_id_a1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -473,7 +495,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# PAGEa1 OK
# FULLb ERROR
# FULLa OK
# Change FULL B backup status to OK
# Change FULLb to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 backup status to ERROR
@ -482,6 +505,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
page_id_b1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -489,41 +513,49 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# Now we start to play with first generation of PAGE backups
# Change PAGEb1 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
# Change PAGEa1 status to OK
# Now we start to play with first generation of PAGE backups
# Change PAGEb1 and FULLb to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# Change PAGEa1 to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
page_id_a2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
# Change PAGEa2 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
# Change PAGEb1 status to OK
# Change PAGEa2 and FULLa to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
# Change PAGEb1 and FULLb to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change PAGEa2 status to OK
# Change PAGEa2 and FULLa to OK
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
# PAGEb2 OK
# PAGEa2 OK
@ -535,14 +567,12 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# Purge backups
backups = os.path.join(backup_dir, 'backups', 'node')
for backup in os.listdir(backups):
if backup in [page_id_a2, page_id_b2, 'pg_probackup.conf']:
continue
with open(
os.path.join(
backups, backup, "backup.control"), "a") as conf:
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() - timedelta(days=3)))
if backup not in [page_id_a2, page_id_b2, 'pg_probackup.conf']:
with open(
os.path.join(
backups, backup, "backup.control"), "a") as conf:
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() - timedelta(days=3)))
output = self.delete_expired(
backup_dir, 'node',
@ -768,12 +798,12 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# Take FULL BACKUPs
backup_id_a = self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
pgbench.wait()
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
backup_id_b = self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
pgbench.wait()
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# Change FULLb backup status to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
@ -781,13 +811,13 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
page_id_a1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
pgbench.wait()
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# Change FULLb backup status to OK
# Change FULLb to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 backup status to ERROR
# Change PAGEa1 to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
# PAGEa1 ERROR
@ -802,86 +832,95 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# FULLb OK
# FULLa OK
pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
pgbench.wait()
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# Change PAGEa1 backup status to OK
# Change PAGEa1 to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
# Change PAGEb1 backup status to ERROR
# Change PAGEb1 and FULLb to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
page_id_a2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
pgbench.wait()
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
# Change PAGEb1 backup status to OK
# Change PAGEb1 and FULLb to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa2 backup status to ERROR
# Change PAGEa2 and FULLa to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
pgbench.wait()
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# PAGEb2 OK
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# FULLa ERROR
# Change PAGEb2 and PAGEb1 status to ERROR
# Change PAGEb2 and PAGEb1 to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
# and FULL stuff
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# PAGEb2 ERROR
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
page_id_a3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
pgbench.wait()
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
# pgbench.wait()
# PAGEa3 OK
# PAGEb2 ERROR
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLb ERROR
# FULLa OK
# Change PAGEa3 status to ERROR
# Change PAGEa3 to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR')
# Change PAGEb2 status to OK
# Change PAGEb2, PAGEb1 and FULLb to OK
self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
page_id_b3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
@ -890,7 +929,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# PAGEa3 ERROR
# PAGEb2 OK
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
@ -911,16 +950,15 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1
self.assertEqual(
self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'],
self.show_pb(
backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'],
page_id_a1)
self.assertEqual(
self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'],
self.show_pb(
backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'],
page_id_a1)
print("Backups {0} and {1} are children of {2}".format(
page_id_a3, page_id_a2, page_id_a1))
# Purge backups
backups = os.path.join(backup_dir, 'backups', 'node')
for backup in os.listdir(backups):
@ -1203,7 +1241,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
@unittest.skip("skip")
def test_window_error_backups(self):
"""
PAGE ERROR
@ -1226,16 +1264,110 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.pgbench_init(scale=3)
# Take FULL BACKUPs
backup_id_a1 = self.backup_node(backup_dir, 'node', node)
page_id_a2 = self.backup_node(
gdb = self.backup_node(
backup_dir, 'node', node, backup_type='page', gdb=True)
page_id_a3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change FULLb backup status to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_window_error_backups_1(self):
"""
DELTA
PAGE ERROR
FULL
-------window
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL BACKUP
full_id = self.backup_node(backup_dir, 'node', node)
# Take PAGE BACKUP
gdb = self.backup_node(
backup_dir, 'node', node, backup_type='page', gdb=True)
gdb.set_breakpoint('pg_stop_backup')
gdb.run_until_break()
gdb.remove_all_breakpoints()
gdb._execute('signal SIGINT')
gdb.continue_execution_until_error()
page_id = self.show_pb(backup_dir, 'node')[1]['id']
# Take DELTA backup
delta_id = self.backup_node(
backup_dir, 'node', node, backup_type='delta',
options=['--retention-window=2', '--delete-expired'])
# Take FULL BACKUP
full2_id = self.backup_node(backup_dir, 'node', node)
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_window_error_backups_2(self):
"""
DELTA
PAGE ERROR
FULL
-------window
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL BACKUP
full_id = self.backup_node(backup_dir, 'node', node)
# Take PAGE BACKUP
gdb = self.backup_node(
backup_dir, 'node', node, backup_type='page', gdb=True)
gdb.set_breakpoint('pg_stop_backup')
gdb.run_until_break()
gdb._execute('signal SIGTERM')
gdb.continue_execution_until_error()
page_id = self.show_pb(backup_dir, 'node')[1]['id']
# Take DELTA backup
delta_id = self.backup_node(
backup_dir, 'node', node, backup_type='delta',
options=['--retention-window=2', '--delete-expired'])
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3)
# Clean after yourself
# self.del_test_dir(module_name, fname)
def test_retention_redundancy_overlapping_chains(self):
""""""
fname = self.id().split('.')[3]
@ -1243,6 +1375,10 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
if self.get_version(node) < 90600:
self.del_test_dir(module_name, fname)
return unittest.skip('Skipped because ptrack support is disabled')
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -1277,3 +1413,91 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_retention_redundancy_overlapping_chains(self):
""""""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
if self.get_version(node) < 90600:
self.del_test_dir(module_name, fname)
return unittest.skip('Skipped because ptrack support is disabled')
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.set_config(
backup_dir, 'node', options=['--retention-redundancy=1'])
# Make backups to be purged
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, backup_type="page")
# Make backups to be keeped
gdb = self.backup_node(backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('backup_files')
gdb.run_until_break()
sleep(1)
self.backup_node(backup_dir, 'node', node, backup_type="page")
gdb.remove_all_breakpoints()
gdb.continue_execution_until_exit()
self.backup_node(backup_dir, 'node', node, backup_type="page")
# Purge backups
log = self.delete_expired(
backup_dir, 'node', options=['--expired', '--wal'])
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_wal_purge_victim(self):
"""
https://github.com/postgrespro/pg_probackup/issues/103
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Make ERROR incremental backup
try:
self.backup_node(backup_dir, 'node', node, backup_type='page')
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because page backup should not be possible "
"without valid full backup.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Valid backup on current timeline 1 is not found. "
"Create new FULL backup before an incremental one.",
e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))
page_id = self.show_pb(backup_dir, 'node')[0]['id']
sleep(1)
# Make FULL backup
self.backup_node(backup_dir, 'node', node, options=['--delete-wal'])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -304,7 +304,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
def test_validate_corrupted_intermediate_backups(self):
"""
make archive node, take FULL, PAGE1, PAGE2 backups,
corrupt file in FULL and PAGE1 backupd, run validate on PAGE1,
corrupt file in FULL and PAGE1 backups, run validate on PAGE1,
expect FULL and PAGE1 to gain status CORRUPT and
PAGE2 gain status ORPHAN
"""
@ -3443,6 +3443,391 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_validate_target_lsn(self):
"""
Check validation to specific LSN
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL backup
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,10000) i")
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(backup_dir, 'node', node_restored)
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
self.switch_wal_segment(node)
backup_id = self.backup_node(
backup_dir, 'node', node_restored,
data_dir=node_restored.data_dir)
target_lsn = self.show_pb(backup_dir, 'node')[1]['stop-lsn']
self.delete_pb(backup_dir, 'node', backup_id)
self.validate_pb(
backup_dir, 'node',
options=[
'--recovery-target-timeline=2',
'--recovery-target-lsn={0}'.format(target_lsn)])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_recovery_target_time_backup_victim(self):
"""
Check that for validation to recovery target
probackup chooses valid backup
https://github.com/postgrespro/pg_probackup/issues/104
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL backup
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,10000) i")
target_time = node.safe_psql(
"postgres",
"select now()").rstrip()
node.safe_psql(
"postgres",
"create table t_heap1 as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100) i")
gdb = self.backup_node(backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('pg_stop_backup')
gdb.run_until_break()
gdb.remove_all_breakpoints()
gdb._execute('signal SIGINT')
gdb.continue_execution_until_error()
backup_id = self.show_pb(backup_dir, 'node')[1]['id']
self.assertEqual(
'ERROR',
self.show_pb(backup_dir, 'node', backup_id)['status'],
'Backup STATUS should be "ERROR"')
self.validate_pb(
backup_dir, 'node',
options=['--recovery-target-time={0}'.format(target_time)])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_recovery_target_lsn_backup_victim(self):
"""
Check that for validation to recovery target
probackup chooses valid backup
https://github.com/postgrespro/pg_probackup/issues/104
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL backup
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,10000) i")
node.safe_psql(
"postgres",
"create table t_heap1 as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100) i")
gdb = self.backup_node(backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('pg_stop_backup')
gdb.run_until_break()
gdb.remove_all_breakpoints()
gdb._execute('signal SIGINT')
gdb.continue_execution_until_error()
backup_id = self.show_pb(backup_dir, 'node')[1]['id']
self.assertEqual(
'ERROR',
self.show_pb(backup_dir, 'node', backup_id)['status'],
'Backup STATUS should be "ERROR"')
target_lsn = self.show_pb(backup_dir, 'node', backup_id)['start-lsn']
self.validate_pb(
backup_dir, 'node',
options=['--recovery-target-lsn={0}'.format(target_lsn)])
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_partial_validate_empty_and_mangled_database_map(self):
"""
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
# create databases
for i in range(1, 10, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
# FULL backup with database_map
backup_id = self.backup_node(
backup_dir, 'node', node, options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
# truncate database_map
path = os.path.join(
backup_dir, 'backups', 'node',
backup_id, 'database', 'database_map')
with open(path, "w") as f:
f.close()
try:
self.validate_pb(
backup_dir, 'node',
options=["--db-include=db1"])
self.assertEqual(
1, 0,
"Expecting Error because database_map is empty.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"WARNING: Backup {0} data files are corrupted".format(
backup_id), e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# mangle database_map
with open(path, "w") as f:
f.write("42")
f.close()
try:
self.validate_pb(
backup_dir, 'node',
options=["--db-include=db1"])
self.assertEqual(
1, 0,
"Expecting Error because database_map is empty.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"WARNING: Backup {0} data files are corrupted".format(
backup_id), e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_partial_validate_exclude(self):
""""""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
for i in range(1, 10, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
# FULL backup
backup_id = self.backup_node(backup_dir, 'node', node)
try:
self.validate_pb(
backup_dir, 'node',
options=[
"--db-include=db1",
"--db-exclude=db2"])
self.assertEqual(
1, 0,
"Expecting Error because of 'db-exclude' and 'db-include'.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: You cannot specify '--db-include' "
"and '--db-exclude' together", e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
try:
self.validate_pb(
backup_dir, 'node',
options=[
"--db-exclude=db1",
"--db-exclude=db5",
"--log-level-console=verbose"])
self.assertEqual(
1, 0,
"Expecting Error because of missing backup ID.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: You must specify parameter (-i, --backup-id) for partial validation",
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
output = self.validate_pb(
backup_dir, 'node', backup_id,
options=[
"--db-exclude=db1",
"--db-exclude=db5",
"--log-level-console=verbose"])
self.assertIn(
"VERBOSE: Skip file validation due to partial restore", output)
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_partial_validate_include(self):
"""
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
for i in range(1, 10, 1):
node.safe_psql(
'postgres',
'CREATE database db{0}'.format(i))
# FULL backup
backup_id = self.backup_node(backup_dir, 'node', node)
try:
self.validate_pb(
backup_dir, 'node',
options=[
"--db-include=db1",
"--db-exclude=db2"])
self.assertEqual(
1, 0,
"Expecting Error because of 'db-exclude' and 'db-include'.\n "
"Output: {0} \n CMD: {1}".format(
self.output, self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: You cannot specify '--db-include' "
"and '--db-exclude' together", e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
output = self.validate_pb(
backup_dir, 'node', backup_id,
options=[
"--db-include=db1",
"--db-include=db5",
"--db-include=postgres",
"--log-level-console=verbose"])
self.assertIn(
"VERBOSE: Skip file validation due to partial restore", output)
output = self.validate_pb(
backup_dir, 'node', backup_id,
options=["--log-level-console=verbose"])
self.assertNotIn(
"VERBOSE: Skip file validation due to partial restore", output)
# Clean after yourself
self.del_test_dir(module_name, fname)
# validate empty backup list
# page from future during validate
# page from future during backup

View File

@ -27,7 +27,7 @@ yum install -y postgresql95-devel make gcc readline-devel openssl-devel pam-deve
make top_srcdir=postgresql-$PGVERSION
make install top_srcdir=postgresql-$PGVERSION
# initalize cluster and database
# initialize cluster and database
yum install -y postgresql95-server
su postgres -c "/usr/pgsql-9.5/bin/initdb -D $PGDATA -k"
cat <<EOF > $PGDATA/pg_hba.conf