1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-02-08 14:28:36 +02:00

Merge branch 'master' into pgpro-2065

This commit is contained in:
Grigory Smolkin 2019-03-30 16:57:02 +03:00
commit 8964f2aff3
71 changed files with 10881 additions and 4444 deletions

View File

@ -47,7 +47,7 @@ endif
PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc -I$(top_srcdir)/$(subdir)/src
override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS)
PG_LIBS = $(libpq_pgport) ${PTHREAD_CFLAGS}
PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS}
all: checksrcdir $(INCLUDES);

View File

@ -10,11 +10,13 @@ As compared to other backup solutions, `pg_probackup` offers the following benef
* Implementing a single backup strategy for multi-server PostgreSQL clusters
* Automatic data consistency checks and on-demand backup validation without actual data recovery
* Managing backups in accordance with retention policy
* Running backup, restore, and validation processes on multiple parallel threads
* Merging incremental into full backups without actual data recovery
* Running backup, restore, merge and validation processes on multiple parallel threads
* Storing backup data in a compressed state to save disk space
* Taking backups from a standby server to avoid extra load on the master server
* Extended logging settings
* Custom commands to simplify WAL log archiving
* External to PGDATA directories, such as directories with config files and scripts, can be included in backup
To manage backup data, `pg_probackup` creates a backup catalog. This directory stores all backup files with additional meta information, as well as WAL archives required for [point-in-time recovery](https://postgrespro.com/docs/postgresql/current/continuous-archiving.html). You can store backups for different instances in separate subdirectories of a single backup catalog.
@ -39,8 +41,7 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp
`pg_probackup` currently has the following limitations:
* Creating backups from a remote server is currently not supported.
* The server from which the backup was taken and the restored server must be compatible by the [block_size](https://postgrespro.com/docs/postgresql/current/runtime-config-preset#GUC-BLOCK-SIZE) and [wal_block_size](https://postgrespro.com/docs/postgresql/current/runtime-config-preset#GUC-WAL-BLOCK-SIZE) parameters and have the same major release number.
* Microsoft Windows operating system is not supported.
* Configuration files outside of PostgreSQL data directory are not included into the backup and should be backed up separately.
* Microsoft Windows operating system support is in beta stage.
## Installation and Setup
### Linux Installation
@ -71,13 +72,28 @@ yum install pg_probackup-{11,10,9.6,9.5}
yumdownloader --source pg_probackup-{11,10,9.6,9.5}
```
Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.com/docs/postgrespro/current/app-pgprobackup.html#pg-probackup-install-and-setup).
## Building from source
### Linux
To compile `pg_probackup`, you must have a PostgreSQL installation and raw source tree. To install `pg_probackup`, execute this in the module's directory:
```shell
make USE_PGXS=1 PG_CONFIG=<path_to_pg_config> top_srcdir=<path_to_PostgreSQL_source_tree>
```
### Windows
Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.com/docs/postgrespro/current/app-pgprobackup.html#pg-probackup-install-and-setup).
Currently pg_probackup can be build using only MSVC 2013.
Build PostgreSQL using [pgwininstall](https://github.com/postgrespro/pgwininstall) or [PostgreSQL instruction](https://www.postgresql.org/docs/10/install-windows-full.html) with MSVC 2013.
If zlib support is needed, src/tools/msvc/config.pl must contain path to directory with compiled zlib. [Example](https://gist.githubusercontent.com/gsmol/80989f976ce9584824ae3b1bfb00bd87/raw/240032950d4ac4801a79625dd00c8f5d4ed1180c/gistfile1.txt)
```shell
CALL "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall" amd64
SET PATH=%PATH%;C:\Perl64\bin
SET PATH=%PATH%;C:\msys64\usr\bin
gen_probackup_project.pl C:\path_to_postgresql_source_tree
```
## Documentation

View File

@ -129,6 +129,7 @@ static void *StreamLog(void *arg);
static void get_remote_pgdata_filelist(parray *files);
static void ReceiveFileList(parray* files, PGconn *conn, PGresult *res, int rownum);
static void remote_copy_file(PGconn *conn, pgFile* file);
static void check_external_for_tablespaces(parray *external_list);
/* Ptrack functions */
static void pg_ptrack_clear(void);
@ -419,7 +420,7 @@ remote_backup_files(void *arg)
instance_config.pguser);
/* check for interrupt */
if (interrupted)
if (interrupted || thread_interrupted)
elog(ERROR, "interrupted during backup");
query_str = psprintf("FILE_BACKUP FILEPATH '%s'",file->path);
@ -482,6 +483,7 @@ do_backup_instance(void)
{
int i;
char database_path[MAXPGPATH];
char external_prefix[MAXPGPATH]; /* Temp value. Used as template */
char dst_backup_path[MAXPGPATH];
char label[1024];
XLogRecPtr prev_backup_start_lsn = InvalidXLogRecPtr;
@ -494,10 +496,16 @@ do_backup_instance(void)
pgBackup *prev_backup = NULL;
parray *prev_backup_filelist = NULL;
parray *backup_list = NULL;
parray *external_dirs = NULL;
pgFile *pg_control = NULL;
elog(LOG, "Database backup start");
if(current.external_dir_str)
{
external_dirs = make_external_directory_list(current.external_dir_str);
check_external_for_tablespaces(external_dirs);
}
/* Initialize size summary */
current.data_bytes = 0;
@ -551,7 +559,7 @@ do_backup_instance(void)
pgBackupGetPath(prev_backup, prev_backup_filelist_path,
lengthof(prev_backup_filelist_path), DATABASE_FILE_LIST);
/* Files of previous backup needed by DELTA backup */
prev_backup_filelist = dir_read_file_list(NULL, prev_backup_filelist_path);
prev_backup_filelist = dir_read_file_list(NULL, NULL, prev_backup_filelist_path);
/* If lsn is not NULL, only pages with higher lsn will be copied. */
prev_backup_start_lsn = prev_backup->start_lsn;
@ -588,8 +596,13 @@ do_backup_instance(void)
strlen(" with pg_probackup"));
pg_start_backup(label, smooth_checkpoint, &current);
/* Update running backup meta with START LSN */
write_backup(&current);
pgBackupGetPath(&current, database_path, lengthof(database_path),
DATABASE_DIR);
pgBackupGetPath(&current, external_prefix, lengthof(external_prefix),
EXTERNAL_DIR);
/* start stream replication */
if (stream_wal)
@ -632,6 +645,7 @@ do_backup_instance(void)
/* By default there are some error */
stream_thread_arg.ret = 1;
thread_interrupted = false;
pthread_create(&stream_thread, NULL, StreamLog, &stream_thread_arg);
}
@ -642,8 +656,26 @@ do_backup_instance(void)
if (is_remote_backup)
get_remote_pgdata_filelist(backup_files_list);
else
dir_list_file(backup_files_list, instance_config.pgdata,
true, true, false);
dir_list_file(backup_files_list, instance_config.pgdata, true, true, false, 0);
/*
* Append to backup list all files and directories
* from external directory option
*/
if (external_dirs)
for (i = 0; i < parray_num(external_dirs); i++)
/* External dirs numeration starts with 1.
* 0 value is not external dir */
dir_list_file(backup_files_list, parray_get(external_dirs, i),
false, true, false, i+1);
/* Sanity check for backup_files_list, thank you, Windows:
* https://github.com/postgrespro/pg_probackup/issues/48
*/
if (parray_num(backup_files_list) == 0)
elog(ERROR, "PGDATA is empty. Either it was concurrently deleted or "
"pg_probackup do not possess sufficient permissions to list PGDATA content");
/*
* Sort pathname ascending. It is necessary to create intermediate
@ -681,8 +713,7 @@ do_backup_instance(void)
* where this backup has started.
*/
extractPageMap(arclog_path, current.tli, instance_config.xlog_seg_size,
prev_backup->start_lsn, current.start_lsn,
backup_files_list);
prev_backup->start_lsn, current.start_lsn);
}
else if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
{
@ -704,18 +735,28 @@ do_backup_instance(void)
{
char dirpath[MAXPGPATH];
char *dir_name;
char database_path[MAXPGPATH];
if (!is_remote_backup)
dir_name = GetRelativePath(file->path, instance_config.pgdata);
if (file->external_dir_num)
dir_name = GetRelativePath(file->path,
parray_get(external_dirs,
file->external_dir_num - 1));
else
dir_name = GetRelativePath(file->path, instance_config.pgdata);
else
dir_name = file->path;
elog(VERBOSE, "Create directory \"%s\"", dir_name);
pgBackupGetPath(&current, database_path, lengthof(database_path),
DATABASE_DIR);
join_path_components(dirpath, database_path, dir_name);
if (file->external_dir_num)
{
char temp[MAXPGPATH];
snprintf(temp, MAXPGPATH, "%s%d", external_prefix,
file->external_dir_num);
join_path_components(dirpath, temp, dir_name);
}
else
join_path_components(dirpath, database_path, dir_name);
dir_create_dir(dirpath, DIR_PERMISSION);
}
@ -727,7 +768,7 @@ do_backup_instance(void)
parray_qsort(backup_files_list, pgFileCompareSize);
/* Sort the array for binary search */
if (prev_backup_filelist)
parray_qsort(prev_backup_filelist, pgFileComparePath);
parray_qsort(prev_backup_filelist, pgFileComparePathWithExternal);
/* init thread args with own file lists */
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
@ -739,6 +780,8 @@ do_backup_instance(void)
arg->from_root = instance_config.pgdata;
arg->to_root = database_path;
arg->external_prefix = external_prefix;
arg->external_dirs = external_dirs;
arg->files_list = backup_files_list;
arg->prev_filelist = prev_backup_filelist;
arg->prev_start_lsn = prev_backup_start_lsn;
@ -749,6 +792,7 @@ do_backup_instance(void)
}
/* Run threads */
thread_interrupted = false;
elog(INFO, "Start transfering data files");
for (i = 0; i < num_threads; i++)
{
@ -774,6 +818,19 @@ do_backup_instance(void)
else
elog(ERROR, "Data files transferring failed");
/* Remove disappeared during backup files from backup_list */
for (i = 0; i < parray_num(backup_files_list); i++)
{
pgFile *tmp_file = (pgFile *) parray_get(backup_files_list, i);
if (tmp_file->write_size == FILE_NOT_FOUND)
{
pg_atomic_clear_flag(&tmp_file->lock);
pgFileFree(tmp_file);
parray_remove(backup_files_list, i);
}
}
/* clean previous backup file list */
if (prev_backup_filelist)
{
@ -819,7 +876,7 @@ do_backup_instance(void)
/* Scan backup PG_XLOG_DIR */
xlog_files_list = parray_new();
join_path_components(pg_xlog_path, database_path, PG_XLOG_DIR);
dir_list_file(xlog_files_list, pg_xlog_path, false, true, false);
dir_list_file(xlog_files_list, pg_xlog_path, false, true, false, 0);
for (i = 0; i < parray_num(xlog_files_list); i++)
{
@ -843,7 +900,12 @@ do_backup_instance(void)
}
/* Print the list of files to backup catalog */
write_backup_filelist(&current, backup_files_list, instance_config.pgdata);
write_backup_filelist(&current, backup_files_list, instance_config.pgdata,
NULL, external_dirs);
/* clean external directories list */
if (external_dirs)
free_dir_list(external_dirs);
/* Compute summary of size of regular files in the backup */
for (i = 0; i < parray_num(backup_files_list); i++)
@ -887,7 +949,7 @@ do_block_validation(void)
backup_files_list = parray_new();
/* list files with the logical path. omit $PGDATA */
dir_list_file(backup_files_list, instance_config.pgdata, true, true, false);
dir_list_file(backup_files_list, instance_config.pgdata, true, true, false, 0);
/*
* Sort pathname ascending. It is necessary to create intermediate
@ -954,7 +1016,7 @@ do_block_validation(void)
for (i = 0; i < num_threads; i++)
{
pthread_join(threads[i], NULL);
if (threads_args[i].ret == 1)
if (threads_args[i].ret > 0)
backup_isok = false;
}
@ -1085,8 +1147,8 @@ do_checkdb(bool need_block_validation, bool need_amcheck)
if (need_block_validation)
do_block_validation();
if (need_amcheck)
do_amcheck();
//if (need_amcheck)
// do_amcheck();
return 0;
}
@ -1199,9 +1261,6 @@ do_backup(time_t start_time)
instance_config.master_user);
}
/* Get exclusive lock of backup catalog */
catalog_lock();
/*
* Ensure that backup directory was initialized for the same PostgreSQL
* instance we opened connection to. And that target backup database PGDATA
@ -1211,16 +1270,25 @@ do_backup(time_t start_time)
if (!is_remote_backup)
check_system_identifiers();
/* Start backup. Update backup status. */
current.status = BACKUP_STATUS_RUNNING;
current.start_time = start_time;
StrNCpy(current.program_version, PROGRAM_VERSION,
sizeof(current.program_version));
/* Save list of external directories */
if (instance_config.external_dir_str &&
pg_strcasecmp(instance_config.external_dir_str, "none") != 0)
{
current.external_dir_str = instance_config.external_dir_str;
}
/* Create backup directory and BACKUP_CONTROL_FILE */
if (pgBackupCreateDir(&current))
elog(ERROR, "cannot create backup directory");
elog(ERROR, "Cannot create backup directory");
if (!lock_backup(&current))
elog(ERROR, "Cannot lock backup %s directory",
base36enc(current.start_time));
write_backup(&current);
elog(LOG, "Backup destination is initialized");
@ -1524,7 +1592,7 @@ pg_ptrack_enable(void)
{
PGresult *res_db;
res_db = pgut_execute(backup_conn, "show ptrack_enable", 0, NULL);
res_db = pgut_execute(backup_conn, "SHOW ptrack_enable", 0, NULL);
if (strcmp(PQgetvalue(res_db, 0, 0), "on") != 0)
{
@ -1541,7 +1609,7 @@ pg_checksum_enable(void)
{
PGresult *res_db;
res_db = pgut_execute(backup_conn, "show data_checksums", 0, NULL);
res_db = pgut_execute(backup_conn, "SHOW data_checksums", 0, NULL);
if (strcmp(PQgetvalue(res_db, 0, 0), "on") != 0)
{
@ -2268,7 +2336,7 @@ pg_stop_backup(pgBackup *backup)
*/
if (backup_files_list)
{
file = pgFileNew(backup_label, true);
file = pgFileNew(backup_label, true, 0);
calc_file_checksum(file);
free(file->path);
file->path = strdup(PG_BACKUP_LABEL_FILE);
@ -2312,7 +2380,7 @@ pg_stop_backup(pgBackup *backup)
if (backup_files_list)
{
file = pgFileNew(tablespace_map, true);
file = pgFileNew(tablespace_map, true, 0);
if (S_ISREG(file->mode))
calc_file_checksum(file);
free(file->path);
@ -2488,10 +2556,11 @@ check_files(void *arg)
struct stat buf;
pgFile *file = (pgFile *) parray_get(arguments->files_list, i);
elog(VERBOSE, "Checking file: \"%s\" ", file->path);
if (!pg_atomic_test_set_flag(&file->lock))
continue;
elog(VERBOSE, "Checking file: \"%s\" ", file->path);
/* check for interrupt */
if (interrupted)
elog(ERROR, "interrupted during checkdb");
@ -2537,20 +2606,16 @@ check_files(void *arg)
file->path + strlen(arguments->from_root) + 1);
if (!check_data_file(arguments, file))
arguments->ret = 1;
arguments->ret = 2;
}
}
else
elog(WARNING, "unexpected file type %d", buf.st_mode);
}
/* Close connection */
if (arguments->backup_conn)
pgut_disconnect(arguments->backup_conn);
/* Data files transferring is successful */
/* TODO where should we set arguments->ret to 1? */
arguments->ret = 0;
/* Data files check is successful */
if (arguments->ret == 1)
arguments->ret = 0;
return NULL;
}
@ -2643,12 +2708,12 @@ backup_files(void *arg)
struct stat buf;
pgFile *file = (pgFile *) parray_get(arguments->files_list, i);
elog(VERBOSE, "Copying file: \"%s\" ", file->path);
if (!pg_atomic_test_set_flag(&file->lock))
continue;
elog(VERBOSE, "Copying file: \"%s\" ", file->path);
/* check for interrupt */
if (interrupted)
if (interrupted || thread_interrupted)
elog(ERROR, "interrupted during backup");
if (progress)
@ -2665,7 +2730,7 @@ backup_files(void *arg)
* If file is not found, this is not en error.
* It could have been deleted by concurrent postgres transaction.
*/
file->write_size = BYTES_INVALID;
file->write_size = FILE_NOT_FOUND;
elog(LOG, "File \"%s\" is not found", file->path);
continue;
}
@ -2684,6 +2749,11 @@ backup_files(void *arg)
if (S_ISREG(buf.st_mode))
{
pgFile **prev_file = NULL;
char *external_path = NULL;
if (file->external_dir_num)
external_path = parray_get(arguments->external_dirs,
file->external_dir_num - 1);
/* Check that file exist in previous backup */
if (current.backup_mode != BACKUP_MODE_FULL)
@ -2691,11 +2761,13 @@ backup_files(void *arg)
char *relative;
pgFile key;
relative = GetRelativePath(file->path, arguments->from_root);
relative = GetRelativePath(file->path, file->external_dir_num ?
external_path : arguments->from_root);
key.path = relative;
key.external_dir_num = file->external_dir_num;
prev_file = (pgFile **) parray_bsearch(arguments->prev_filelist,
&key, pgFileComparePath);
&key, pgFileComparePathWithExternal);
if (prev_file)
/* File exists in previous backup */
file->exists_in_prev = true;
@ -2716,17 +2788,23 @@ backup_files(void *arg)
instance_config.compress_alg,
instance_config.compress_level))
{
file->write_size = BYTES_INVALID;
/* disappeared file not to be confused with 'not changed' */
if (file->write_size != FILE_NOT_FOUND)
file->write_size = BYTES_INVALID;
elog(VERBOSE, "File \"%s\" was not copied to backup", file->path);
continue;
}
}
else if (strcmp(file->name, "pg_control") == 0)
else if (!file->external_dir_num &&
strcmp(file->name, "pg_control") == 0)
copy_pgcontrol_file(arguments->from_root, arguments->to_root,
file);
else
{
const char *src;
const char *dst;
bool skip = false;
char external_dst[MAXPGPATH];
/* If non-data file has not changed since last backup... */
if (prev_file && file->exists_in_prev &&
@ -2737,10 +2815,25 @@ backup_files(void *arg)
if (EQ_TRADITIONAL_CRC32(file->crc, (*prev_file)->crc))
skip = true; /* ...skip copying file. */
}
if (skip ||
!copy_file(arguments->from_root, arguments->to_root, file))
/* Set file paths */
if (file->external_dir_num)
{
file->write_size = BYTES_INVALID;
makeExternalDirPathByNum(external_dst,
arguments->external_prefix,
file->external_dir_num);
src = external_path;
dst = external_dst;
}
else
{
src = arguments->from_root;
dst = arguments->to_root;
}
if (skip || !copy_file(src, dst, file))
{
/* disappeared file not to be confused with 'not changed' */
if (file->write_size != FILE_NOT_FOUND)
file->write_size = BYTES_INVALID;
elog(VERBOSE, "File \"%s\" was not copied to backup",
file->path);
continue;
@ -3090,7 +3183,7 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
static XLogRecPtr prevpos = InvalidXLogRecPtr;
/* check for interrupt */
if (interrupted)
if (interrupted || thread_interrupted)
elog(ERROR, "Interrupted during backup");
/* we assume that we get called once at the end of each segment */
@ -3167,6 +3260,20 @@ StreamLog(void *arg)
stream_stop_timeout = 0;
stream_stop_begin = 0;
#if PG_VERSION_NUM >= 100000
/* if slot name was not provided for temp slot, use default slot name */
if (!replication_slot && temp_slot)
replication_slot = "pg_probackup_slot";
#endif
#if PG_VERSION_NUM >= 110000
/* Create temp repslot */
if (temp_slot)
CreateReplicationSlot(stream_arg->conn, replication_slot,
NULL, temp_slot, true, true, false);
#endif
/*
* Start the replication
*/
@ -3187,6 +3294,9 @@ StreamLog(void *arg)
ctl.walmethod = CreateWalDirectoryMethod(stream_arg->basedir, 0, true);
ctl.replication_slot = replication_slot;
ctl.stop_socket = PGINVALID_SOCKET;
#if PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000
ctl.temp_slot = temp_slot;
#endif
#else
ctl.basedir = (char *) stream_arg->basedir;
#endif
@ -3317,6 +3427,45 @@ pg_ptrack_get_block(backup_files_arg *arguments,
return result;
}
static void
check_external_for_tablespaces(parray *external_list)
{
PGconn *conn;
PGresult *res;
int i = 0;
int j = 0;
char *tablespace_path = NULL;
char *query = "SELECT pg_catalog.pg_tablespace_location(oid)\n"
"FROM pg_tablespace\n"
"WHERE pg_catalog.pg_tablespace_location(oid) <> '';";
conn = backup_conn;
res = pgut_execute(conn, query, 0, NULL);
/* Check successfull execution of query */
if (!res)
elog(ERROR, "Failed to get list of tablespaces");
for (i = 0; i < res->ntups; i++)
{
tablespace_path = PQgetvalue(res, i, 0);
Assert (strlen(tablespace_path) > 0);
for (j = 0; j < parray_num(external_list); j++)
{
char *external_path = parray_get(external_list, j);
if (path_is_prefix_of_path(external_path, tablespace_path))
elog(ERROR, "External directory path (-E option) \"%s\" "
"contains tablespace \"%s\"",
external_path, tablespace_path);
if (path_is_prefix_of_path(tablespace_path, external_path))
elog(WARNING, "External directory path (-E option) \"%s\" "
"is in tablespace directory \"%s\"",
tablespace_path, external_path);
}
}
PQclear(res);
}
/* Clear ptrack files in all databases of the instance we connected to */
static parray*
get_index_list(PGresult *res_db, int db_number,
@ -3477,4 +3626,4 @@ amcheck_one_index(backup_files_arg *arguments,
pfree(params[0]);
PQclear(res);
return true;
}
}

View File

@ -21,23 +21,82 @@ static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"};
static pgBackup *readBackupControlFile(const char *path);
static bool exit_hook_registered = false;
static char lock_file[MAXPGPATH];
static parray *lock_files = NULL;
static void
unlink_lock_atexit(void)
{
int res;
res = unlink(lock_file);
if (res != 0 && res != ENOENT)
elog(WARNING, "%s: %s", lock_file, strerror(errno));
int i;
if (lock_files == NULL)
return;
for (i = 0; i < parray_num(lock_files); i++)
{
char *lock_file = (char *) parray_get(lock_files, i);
int res;
res = unlink(lock_file);
if (res != 0 && errno != ENOENT)
elog(WARNING, "%s: %s", lock_file, strerror(errno));
}
parray_walk(lock_files, pfree);
parray_free(lock_files);
lock_files = NULL;
}
/*
* Create a lockfile.
* Read backup meta information from BACKUP_CONTROL_FILE.
* If no backup matches, return NULL.
*/
pgBackup *
read_backup(time_t timestamp)
{
pgBackup tmp;
char conf_path[MAXPGPATH];
tmp.start_time = timestamp;
pgBackupGetPath(&tmp, conf_path, lengthof(conf_path), BACKUP_CONTROL_FILE);
return readBackupControlFile(conf_path);
}
/*
* Save the backup status into BACKUP_CONTROL_FILE.
*
* We need to reread the backup using its ID and save it changing only its
* status.
*/
void
catalog_lock(void)
write_backup_status(pgBackup *backup, BackupStatus status)
{
pgBackup *tmp;
tmp = read_backup(backup->start_time);
if (!tmp)
{
/*
* Silently exit the function, since read_backup already logged the
* warning message.
*/
return;
}
backup->status = status;
tmp->status = backup->status;
write_backup(tmp);
pgBackupFree(tmp);
}
/*
* Create exclusive lockfile in the backup's directory.
*/
bool
lock_backup(pgBackup *backup)
{
char lock_file[MAXPGPATH];
int fd;
char buffer[MAXPGPATH * 2 + 256];
int ntries;
@ -46,7 +105,7 @@ catalog_lock(void)
pid_t my_pid,
my_p_pid;
join_path_components(lock_file, backup_instance_path, BACKUP_CATALOG_PID);
pgBackupGetPath(backup, lock_file, lengthof(lock_file), BACKUP_CATALOG_PID);
/*
* If the PID in the lockfile is our own PID or our parent's or
@ -99,7 +158,7 @@ catalog_lock(void)
* Couldn't create the pid file. Probably it already exists.
*/
if ((errno != EEXIST && errno != EACCES) || ntries > 100)
elog(ERROR, "could not create lock file \"%s\": %s",
elog(ERROR, "Could not create lock file \"%s\": %s",
lock_file, strerror(errno));
/*
@ -111,22 +170,22 @@ catalog_lock(void)
{
if (errno == ENOENT)
continue; /* race condition; try again */
elog(ERROR, "could not open lock file \"%s\": %s",
elog(ERROR, "Could not open lock file \"%s\": %s",
lock_file, strerror(errno));
}
if ((len = read(fd, buffer, sizeof(buffer) - 1)) < 0)
elog(ERROR, "could not read lock file \"%s\": %s",
elog(ERROR, "Could not read lock file \"%s\": %s",
lock_file, strerror(errno));
close(fd);
if (len == 0)
elog(ERROR, "lock file \"%s\" is empty", lock_file);
elog(ERROR, "Lock file \"%s\" is empty", lock_file);
buffer[len] = '\0';
encoded_pid = atoi(buffer);
if (encoded_pid <= 0)
elog(ERROR, "bogus data in lock file \"%s\": \"%s\"",
elog(ERROR, "Bogus data in lock file \"%s\": \"%s\"",
lock_file, buffer);
/*
@ -140,9 +199,21 @@ catalog_lock(void)
*/
if (encoded_pid != my_pid && encoded_pid != my_p_pid)
{
if (kill(encoded_pid, 0) == 0 ||
(errno != ESRCH && errno != EPERM))
elog(ERROR, "lock file \"%s\" already exists", lock_file);
if (kill(encoded_pid, 0) == 0)
{
elog(WARNING, "Process %d is using backup %s and still is running",
encoded_pid, base36enc(backup->start_time));
return false;
}
else
{
if (errno == ESRCH)
elog(WARNING, "Process %d which used backup %s no longer exists",
encoded_pid, base36enc(backup->start_time));
else
elog(ERROR, "Failed to send signal 0 to a process %d: %s",
encoded_pid, strerror(errno));
}
}
/*
@ -151,7 +222,7 @@ catalog_lock(void)
* would-be creators.
*/
if (unlink(lock_file) < 0)
elog(ERROR, "could not remove old lock file \"%s\": %s",
elog(ERROR, "Could not remove old lock file \"%s\": %s",
lock_file, strerror(errno));
}
@ -169,7 +240,7 @@ catalog_lock(void)
unlink(lock_file);
/* if write didn't set errno, assume problem is no disk space */
errno = save_errno ? save_errno : ENOSPC;
elog(ERROR, "could not write lock file \"%s\": %s",
elog(ERROR, "Could not write lock file \"%s\": %s",
lock_file, strerror(errno));
}
if (fsync(fd) != 0)
@ -179,7 +250,7 @@ catalog_lock(void)
close(fd);
unlink(lock_file);
errno = save_errno;
elog(ERROR, "could not write lock file \"%s\": %s",
elog(ERROR, "Could not write lock file \"%s\": %s",
lock_file, strerror(errno));
}
if (close(fd) != 0)
@ -188,7 +259,7 @@ catalog_lock(void)
unlink(lock_file);
errno = save_errno;
elog(ERROR, "could not write lock file \"%s\": %s",
elog(ERROR, "Culd not write lock file \"%s\": %s",
lock_file, strerror(errno));
}
@ -200,41 +271,13 @@ catalog_lock(void)
atexit(unlink_lock_atexit);
exit_hook_registered = true;
}
}
/*
* Read backup meta information from BACKUP_CONTROL_FILE.
* If no backup matches, return NULL.
*/
pgBackup *
read_backup(time_t timestamp)
{
pgBackup tmp;
char conf_path[MAXPGPATH];
/* Use parray so that the lock files are unlinked in a loop */
if (lock_files == NULL)
lock_files = parray_new();
parray_append(lock_files, pgut_strdup(lock_file));
tmp.start_time = timestamp;
pgBackupGetPath(&tmp, conf_path, lengthof(conf_path), BACKUP_CONTROL_FILE);
return readBackupControlFile(conf_path);
}
/*
* Save the backup status into BACKUP_CONTROL_FILE.
*
* We need to reread the backup using its ID and save it changing only its
* status.
*/
void
write_backup_status(pgBackup *backup)
{
pgBackup *tmp;
tmp = read_backup(backup->start_time);
tmp->status = backup->status;
write_backup(tmp);
pgBackupFree(tmp);
return true;
}
/*
@ -266,11 +309,10 @@ IsDir(const char *dirpath, const char *entry)
parray *
catalog_get_backup_list(time_t requested_backup_id)
{
DIR *data_dir = NULL;
struct dirent *data_ent = NULL;
parray *backups = NULL;
pgBackup *backup = NULL;
int i;
DIR *data_dir = NULL;
struct dirent *data_ent = NULL;
parray *backups = NULL;
int i;
/* open backup instance backups directory */
data_dir = opendir(backup_instance_path);
@ -285,8 +327,9 @@ catalog_get_backup_list(time_t requested_backup_id)
backups = parray_new();
for (; (data_ent = readdir(data_dir)) != NULL; errno = 0)
{
char backup_conf_path[MAXPGPATH];
char data_path[MAXPGPATH];
char backup_conf_path[MAXPGPATH];
char data_path[MAXPGPATH];
pgBackup *backup = NULL;
/* skip not-directory entries and hidden entries */
if (!IsDir(backup_instance_path, data_ent->d_name)
@ -320,7 +363,6 @@ catalog_get_backup_list(time_t requested_backup_id)
continue;
}
parray_append(backups, backup);
backup = NULL;
if (errno && errno != ENOENT)
{
@ -344,25 +386,18 @@ catalog_get_backup_list(time_t requested_backup_id)
/* Link incremental backups with their ancestors.*/
for (i = 0; i < parray_num(backups); i++)
{
pgBackup *curr = parray_get(backups, i);
int j;
pgBackup *curr = parray_get(backups, i);
pgBackup **ancestor;
pgBackup key;
if (curr->backup_mode == BACKUP_MODE_FULL)
continue;
for (j = i+1; j < parray_num(backups); j++)
{
pgBackup *ancestor = parray_get(backups, j);
if (ancestor->start_time == curr->parent_backup)
{
curr->parent_backup_link = ancestor;
/* elog(INFO, "curr %s, ancestor %s j=%d", base36enc_dup(curr->start_time),
base36enc_dup(ancestor->start_time), j); */
break;
}
}
key.start_time = curr->parent_backup;
ancestor = (pgBackup **) parray_bsearch(backups, &key,
pgBackupCompareIdDesc);
if (ancestor)
curr->parent_backup_link = *ancestor;
}
return backups;
@ -370,8 +405,6 @@ catalog_get_backup_list(time_t requested_backup_id)
err_proc:
if (data_dir)
closedir(data_dir);
if (backup)
pgBackupFree(backup);
if (backups)
parray_walk(backups, pgBackupFree);
parray_free(backups);
@ -381,6 +414,31 @@ err_proc:
return NULL;
}
/*
* Lock list of backups. Function goes in backward direction.
*/
void
catalog_lock_backup_list(parray *backup_list, int from_idx, int to_idx)
{
int start_idx,
end_idx;
int i;
if (parray_num(backup_list) == 0)
return;
start_idx = Max(from_idx, to_idx);
end_idx = Min(from_idx, to_idx);
for (i = start_idx; i >= end_idx; i--)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
if (!lock_backup(backup))
elog(ERROR, "Cannot lock backup %s directory",
base36enc(backup->start_time));
}
}
/*
* Find the last completed backup on given timeline
*/
@ -408,7 +466,25 @@ pgBackupCreateDir(pgBackup *backup)
{
int i;
char path[MAXPGPATH];
char *subdirs[] = { DATABASE_DIR, NULL };
parray *subdirs = parray_new();
parray_append(subdirs, pg_strdup(DATABASE_DIR));
/* Add external dirs containers */
if (backup->external_dir_str)
{
parray *external_list;
external_list = make_external_directory_list(backup->external_dir_str);
for (int i = 0; i < parray_num(external_list); i++)
{
char temp[MAXPGPATH];
/* Numeration of externaldirs starts with 1 */
makeExternalDirPathByNum(temp, EXTERNAL_DIR, i+1);
parray_append(subdirs, pg_strdup(temp));
}
free_dir_list(external_list);
}
pgBackupGetPath(backup, path, lengthof(path), NULL);
@ -418,12 +494,13 @@ pgBackupCreateDir(pgBackup *backup)
dir_create_dir(path, DIR_PERMISSION);
/* create directories for actual backup files */
for (i = 0; subdirs[i]; i++)
for (i = 0; i < parray_num(subdirs); i++)
{
pgBackupGetPath(backup, path, lengthof(path), subdirs[i]);
pgBackupGetPath(backup, path, lengthof(path), parray_get(subdirs, i));
dir_create_dir(path, DIR_PERMISSION);
}
free_dir_list(subdirs);
return 0;
}
@ -465,6 +542,11 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
time2iso(timestamp, lengthof(timestamp), backup->start_time);
fprintf(out, "start-time = '%s'\n", timestamp);
if (backup->merge_time > 0)
{
time2iso(timestamp, lengthof(timestamp), backup->merge_time);
fprintf(out, "merge-time = '%s'\n", timestamp);
}
if (backup->end_time > 0)
{
time2iso(timestamp, lengthof(timestamp), backup->end_time);
@ -496,6 +578,10 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
/* print connection info except password */
if (backup->primary_conninfo)
fprintf(out, "primary_conninfo = '%s'\n", backup->primary_conninfo);
/* print external directories list */
if (backup->external_dir_str)
fprintf(out, "external-dirs = '%s'\n", backup->external_dir_str);
}
/*
@ -504,46 +590,79 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
void
write_backup(pgBackup *backup)
{
FILE *fp = NULL;
char conf_path[MAXPGPATH];
FILE *fp = NULL;
char path[MAXPGPATH];
char path_temp[MAXPGPATH];
int errno_temp;
pgBackupGetPath(backup, conf_path, lengthof(conf_path), BACKUP_CONTROL_FILE);
fp = fopen(conf_path, "wt");
pgBackupGetPath(backup, path, lengthof(path), BACKUP_CONTROL_FILE);
snprintf(path_temp, sizeof(path_temp), "%s.tmp", path);
fp = fopen(path_temp, "wt");
if (fp == NULL)
elog(ERROR, "Cannot open configuration file \"%s\": %s", conf_path,
strerror(errno));
elog(ERROR, "Cannot open configuration file \"%s\": %s",
path_temp, strerror(errno));
pgBackupWriteControl(fp, backup);
if (fflush(fp) != 0 ||
fsync(fileno(fp)) != 0 ||
fclose(fp))
{
errno_temp = errno;
unlink(path_temp);
elog(ERROR, "Cannot write configuration file \"%s\": %s",
conf_path, strerror(errno));
path_temp, strerror(errno_temp));
}
if (rename(path_temp, path) < 0)
{
errno_temp = errno;
unlink(path_temp);
elog(ERROR, "Cannot rename configuration file \"%s\" to \"%s\": %s",
path_temp, path, strerror(errno_temp));
}
}
/*
* Output the list of files to backup catalog DATABASE_FILE_LIST
*/
void
write_backup_filelist(pgBackup *backup, parray *files, const char *root)
write_backup_filelist(pgBackup *backup, parray *files, const char *root,
const char *external_prefix, parray *external_list)
{
FILE *fp;
char path[MAXPGPATH];
char path_temp[MAXPGPATH];
int errno_temp;
pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST);
snprintf(path_temp, sizeof(path_temp), "%s.tmp", path);
fp = fopen(path, "wt");
fp = fopen(path_temp, "wt");
if (fp == NULL)
elog(ERROR, "Cannot open file list \"%s\": %s", path,
strerror(errno));
elog(ERROR, "Cannot open file list \"%s\": %s", path_temp,
strerror(errno));
print_file_list(fp, files, root);
print_file_list(fp, files, root, external_prefix, external_list);
if (fflush(fp) != 0 ||
fsync(fileno(fp)) != 0 ||
fclose(fp))
elog(ERROR, "Cannot write file list \"%s\": %s", path, strerror(errno));
{
errno_temp = errno;
unlink(path_temp);
elog(ERROR, "Cannot write file list \"%s\": %s",
path_temp, strerror(errno));
}
if (rename(path_temp, path) < 0)
{
errno_temp = errno;
unlink(path_temp);
elog(ERROR, "Cannot rename configuration file \"%s\" to \"%s\": %s",
path_temp, path, strerror(errno_temp));
}
}
/*
@ -572,6 +691,7 @@ readBackupControlFile(const char *path)
{'s', 0, "start-lsn", &start_lsn, SOURCE_FILE_STRICT},
{'s', 0, "stop-lsn", &stop_lsn, SOURCE_FILE_STRICT},
{'t', 0, "start-time", &backup->start_time, SOURCE_FILE_STRICT},
{'t', 0, "merge-time", &backup->merge_time, SOURCE_FILE_STRICT},
{'t', 0, "end-time", &backup->end_time, SOURCE_FILE_STRICT},
{'U', 0, "recovery-xid", &backup->recovery_xid, SOURCE_FILE_STRICT},
{'t', 0, "recovery-time", &backup->recovery_time, SOURCE_FILE_STRICT},
@ -589,6 +709,7 @@ readBackupControlFile(const char *path)
{'u', 0, "compress-level", &backup->compress_level, SOURCE_FILE_STRICT},
{'b', 0, "from-replica", &backup->from_replica, SOURCE_FILE_STRICT},
{'s', 0, "primary-conninfo", &backup->primary_conninfo, SOURCE_FILE_STRICT},
{'s', 0, "external-dirs", &backup->external_dir_str, SOURCE_FILE_STRICT},
{0}
};
@ -600,7 +721,7 @@ readBackupControlFile(const char *path)
return NULL;
}
parsed_options = config_read_opt(path, options, WARNING, true);
parsed_options = config_read_opt(path, options, WARNING, true, true);
if (parsed_options == 0)
{
@ -797,6 +918,7 @@ pgBackupInit(pgBackup *backup)
backup->start_lsn = 0;
backup->stop_lsn = 0;
backup->start_time = (time_t) 0;
backup->merge_time = (time_t) 0;
backup->end_time = (time_t) 0;
backup->recovery_xid = 0;
backup->recovery_time = (time_t) 0;
@ -818,6 +940,7 @@ pgBackupInit(pgBackup *backup)
backup->primary_conninfo = NULL;
backup->program_version[0] = '\0';
backup->server_version[0] = '\0';
backup->external_dir_str = NULL;
}
/* free pgBackup object */
@ -827,6 +950,7 @@ pgBackupFree(void *backup)
pgBackup *b = (pgBackup *) backup;
pfree(b->primary_conninfo);
pfree(b->external_dir_str);
pfree(backup);
}
@ -903,8 +1027,15 @@ find_parent_full_backup(pgBackup *current_backup)
}
if (base_full_backup->backup_mode != BACKUP_MODE_FULL)
elog(ERROR, "Failed to find FULL backup parent for %s",
base36enc(current_backup->start_time));
{
if (base_full_backup->parent_backup)
elog(WARNING, "Backup %s is missing",
base36enc(base_full_backup->parent_backup));
else
elog(WARNING, "Failed to find parent FULL backup for %s",
base36enc(current_backup->start_time));
return NULL;
}
return base_full_backup;
}
@ -981,6 +1112,9 @@ is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive)
if (!child_backup)
elog(ERROR, "Target backup cannot be NULL");
if (inclusive && child_backup->start_time == parent_backup_time)
return true;
while (child_backup->parent_backup_link &&
child_backup->parent_backup != parent_backup_time)
{
@ -990,8 +1124,8 @@ is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive)
if (child_backup->parent_backup == parent_backup_time)
return true;
if (inclusive && child_backup->start_time == parent_backup_time)
return true;
//if (inclusive && child_backup->start_time == parent_backup_time)
// return true;
return false;
}

View File

@ -9,6 +9,8 @@
#include "pg_probackup.h"
#include <unistd.h>
#include "utils/configuration.h"
#include "utils/json.h"
@ -61,6 +63,11 @@ ConfigOption instance_options[] =
OPTION_INSTANCE_GROUP, 0, option_get_value
},
#endif
{
's', 'E', "external-dirs",
&instance_config.external_dir_str, SOURCE_CMD, 0,
OPTION_INSTANCE_GROUP, 0, option_get_value
},
/* Connection options */
{
's', 'd', "pgdatabase",
@ -208,16 +215,22 @@ do_show_config(void)
* values into the file.
*/
void
do_set_config(void)
do_set_config(bool missing_ok)
{
char path[MAXPGPATH];
char path_temp[MAXPGPATH];
FILE *fp;
int i;
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
fp = fopen(path, "wt");
snprintf(path_temp, sizeof(path_temp), "%s.tmp", path);
if (!missing_ok && !fileExists(path))
elog(ERROR, "Configuration file \"%s\" doesn't exist", path);
fp = fopen(path_temp, "wt");
if (fp == NULL)
elog(ERROR, "cannot create %s: %s",
elog(ERROR, "Cannot create configuration file \"%s\": %s",
BACKUP_CATALOG_CONF_FILE, strerror(errno));
current_group = NULL;
@ -248,6 +261,14 @@ do_set_config(void)
}
fclose(fp);
if (rename(path_temp, path) < 0)
{
int errno_temp = errno;
unlink(path_temp);
elog(ERROR, "Cannot rename configuration file \"%s\" to \"%s\": %s",
path_temp, path, strerror(errno_temp));
}
}
void

View File

@ -22,6 +22,8 @@
#include <zlib.h>
#endif
#include "utils/thread.h"
/* Union to ease operations on relation pages */
typedef union DataPage
{
@ -318,7 +320,7 @@ prepare_page(backup_files_arg *arguments,
BlockNumber absolute_blknum = file->segno * RELSEG_SIZE + blknum;
/* check for interrupt */
if (interrupted)
if (interrupted || thread_interrupted)
elog(ERROR, "Interrupted during backup");
/*
@ -625,6 +627,7 @@ backup_data_file(backup_files_arg* arguments,
if (errno == ENOENT)
{
elog(LOG, "File \"%s\" is not found", file->path);
file->write_size = FILE_NOT_FOUND;
return false;
}
@ -763,7 +766,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
{
elog(ERROR, "cannot open backup file \"%s\": %s", file->path,
elog(ERROR, "Cannot open backup file \"%s\": %s", file->path,
strerror(errno));
}
}
@ -780,7 +783,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
{
int errno_tmp = errno;
fclose(in);
elog(ERROR, "cannot open restore target file \"%s\": %s",
elog(ERROR, "Cannot open restore target file \"%s\": %s",
to_path, strerror(errno_tmp));
}
@ -820,16 +823,22 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
break; /* EOF found */
else if (read_len != 0 && feof(in))
elog(ERROR,
"odd size page found at block %u of \"%s\"",
"Odd size page found at block %u of \"%s\"",
blknum, file->path);
else
elog(ERROR, "cannot read header of block %u of \"%s\": %s",
elog(ERROR, "Cannot read header of block %u of \"%s\": %s",
blknum, file->path, strerror(errno_tmp));
}
if (header.block == 0 && header.compressed_size == 0)
{
elog(VERBOSE, "Skip empty block of \"%s\"", file->path);
continue;
}
if (header.block < blknum)
elog(ERROR, "backup is broken at file->path %s block %u",
file->path, blknum);
elog(ERROR, "Backup is broken at block %u of \"%s\"",
blknum, file->path);
blknum = header.block;
@ -850,7 +859,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
read_len = fread(compressed_page.data, 1,
MAXALIGN(header.compressed_size), in);
if (read_len != MAXALIGN(header.compressed_size))
elog(ERROR, "cannot read block %u of \"%s\" read %zu of %d",
elog(ERROR, "Cannot read block %u of \"%s\" read %zu of %d",
blknum, file->path, read_len, header.compressed_size);
/*
@ -874,7 +883,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
blknum, file->path, errormsg);
if (uncompressed_size != BLCKSZ)
elog(ERROR, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
elog(ERROR, "Page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
file->path, uncompressed_size);
}
@ -885,7 +894,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
* Seek and write the restored page.
*/
if (fseek(out, write_pos, SEEK_SET) < 0)
elog(ERROR, "cannot seek block %u of \"%s\": %s",
elog(ERROR, "Cannot seek block %u of \"%s\": %s",
blknum, to_path, strerror(errno));
if (write_header)
@ -893,7 +902,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
/* We uncompressed the page, so its size is BLCKSZ */
header.compressed_size = BLCKSZ;
if (fwrite(&header, 1, sizeof(header), out) != sizeof(header))
elog(ERROR, "cannot write header of block %u of \"%s\": %s",
elog(ERROR, "Cannot write header of block %u of \"%s\": %s",
blknum, file->path, strerror(errno));
}
@ -904,14 +913,14 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
if (uncompressed_size == BLCKSZ)
{
if (fwrite(page.data, 1, BLCKSZ, out) != BLCKSZ)
elog(ERROR, "cannot write block %u of \"%s\": %s",
elog(ERROR, "Cannot write block %u of \"%s\": %s",
blknum, file->path, strerror(errno));
}
else
{
/* */
if (fwrite(compressed_page.data, 1, BLCKSZ, out) != BLCKSZ)
elog(ERROR, "cannot write block %u of \"%s\": %s",
elog(ERROR, "Cannot write block %u of \"%s\": %s",
blknum, file->path, strerror(errno));
}
}
@ -949,7 +958,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
* Truncate file to this length.
*/
if (ftruncate(fileno(out), write_pos) != 0)
elog(ERROR, "cannot truncate \"%s\": %s",
elog(ERROR, "Cannot truncate \"%s\": %s",
file->path, strerror(errno));
elog(VERBOSE, "Delta truncate file %s to block %u",
file->path, truncate_from);
@ -963,14 +972,14 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
if (in)
fclose(in);
fclose(out);
elog(ERROR, "cannot change mode of \"%s\": %s", to_path,
elog(ERROR, "Cannot change mode of \"%s\": %s", to_path,
strerror(errno_tmp));
}
if (fflush(out) != 0 ||
fsync(fileno(out)) != 0 ||
fclose(out))
elog(ERROR, "cannot write \"%s\": %s", to_path, strerror(errno));
elog(ERROR, "Cannot write \"%s\": %s", to_path, strerror(errno));
if (in)
fclose(in);
}
@ -1007,7 +1016,11 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
/* maybe deleted, it's not error */
if (errno == ENOENT)
{
elog(LOG, "File \"%s\" is not found", file->path);
file->write_size = FILE_NOT_FOUND;
return false;
}
elog(ERROR, "cannot open source file \"%s\": %s", file->path,
strerror(errno));
@ -1158,7 +1171,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
bool overwrite)
{
FILE *in = NULL;
FILE *out=NULL;
int out;
char buf[XLOG_BLCKSZ];
const char *to_path_p;
char to_path_temp[MAXPGPATH];
@ -1198,7 +1211,13 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
{
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", gz_to_path);
gz_out = gzopen(to_path_temp, PG_BINARY_W);
out = open(to_path_temp, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
S_IRUSR | S_IWUSR);
if (out < 0)
elog(ERROR, "Cannot open destination temporary WAL file \"%s\": %s",
to_path_temp, strerror(errno));
gz_out = gzdopen(out, PG_BINARY_W);
if (gzsetparams(gz_out, instance_config.compress_level, Z_DEFAULT_STRATEGY) != Z_OK)
elog(ERROR, "Cannot set compression level %d to file \"%s\": %s",
instance_config.compress_level, to_path_temp,
@ -1209,9 +1228,10 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
{
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", to_path);
out = fopen(to_path_temp, PG_BINARY_W);
if (out == NULL)
elog(ERROR, "Cannot open destination WAL file \"%s\": %s",
out = open(to_path_temp, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
S_IRUSR | S_IWUSR);
if (out < 0)
elog(ERROR, "Cannot open destination temporary WAL file \"%s\": %s",
to_path_temp, strerror(errno));
}
@ -1247,7 +1267,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
else
#endif
{
if (fwrite(buf, 1, read_len, out) != read_len)
if (write(out, buf, read_len) != read_len)
{
errno_temp = errno;
unlink(to_path_temp);
@ -1275,9 +1295,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
else
#endif
{
if (fflush(out) != 0 ||
fsync(fileno(out)) != 0 ||
fclose(out))
if (fsync(out) != 0 || close(out) != 0)
{
errno_temp = errno;
unlink(to_path_temp);
@ -1318,7 +1336,7 @@ void
get_wal_file(const char *from_path, const char *to_path)
{
FILE *in = NULL;
FILE *out;
int out;
char buf[XLOG_BLCKSZ];
const char *from_path_p = from_path;
char to_path_temp[MAXPGPATH];
@ -1368,10 +1386,11 @@ get_wal_file(const char *from_path, const char *to_path)
/* open backup file for write */
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", to_path);
out = fopen(to_path_temp, PG_BINARY_W);
if (out == NULL)
elog(ERROR, "Cannot open destination WAL file \"%s\": %s",
to_path_temp, strerror(errno));
out = open(to_path_temp, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
S_IRUSR | S_IWUSR);
if (out < 0)
elog(ERROR, "Cannot open destination temporary WAL file \"%s\": %s",
to_path_temp, strerror(errno));
/* copy content */
for (;;)
@ -1405,7 +1424,7 @@ get_wal_file(const char *from_path, const char *to_path)
if (read_len > 0)
{
if (fwrite(buf, 1, read_len, out) != read_len)
if (write(out, buf, read_len) != read_len)
{
errno_temp = errno;
unlink(to_path_temp);
@ -1429,9 +1448,7 @@ get_wal_file(const char *from_path, const char *to_path)
}
}
if (fflush(out) != 0 ||
fsync(fileno(out)) != 0 ||
fclose(out))
if (fsync(out) != 0 || close(out) != 0)
{
errno_temp = errno;
unlink(to_path_temp);
@ -1621,7 +1638,7 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
pg_crc32 crc;
bool use_crc32c = backup_version <= 20021 || backup_version >= 20025;
elog(VERBOSE, "validate relation blocks for file %s", file->name);
elog(VERBOSE, "Validate relation blocks for file %s", file->path);
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
@ -1632,7 +1649,7 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
return false;
}
elog(ERROR, "cannot open file \"%s\": %s",
elog(ERROR, "Cannot open file \"%s\": %s",
file->path, strerror(errno));
}
@ -1656,20 +1673,26 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
break; /* EOF found */
else if (read_len != 0 && feof(in))
elog(WARNING,
"odd size page found at block %u of \"%s\"",
"Odd size page found at block %u of \"%s\"",
blknum, file->path);
else
elog(WARNING, "cannot read header of block %u of \"%s\": %s",
elog(WARNING, "Cannot read header of block %u of \"%s\": %s",
blknum, file->path, strerror(errno_tmp));
return false;
}
COMP_FILE_CRC32(use_crc32c, crc, &header, read_len);
if (header.block == 0 && header.compressed_size == 0)
{
elog(VERBOSE, "Skip empty block of \"%s\"", file->path);
continue;
}
if (header.block < blknum)
{
elog(WARNING, "backup is broken at file->path %s block %u",
file->path, blknum);
elog(WARNING, "Backup is broken at block %u of \"%s\"",
blknum, file->path);
return false;
}
@ -1677,8 +1700,8 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
if (header.compressed_size == PageIsTruncated)
{
elog(LOG, "File %s, block %u is truncated",
file->path, blknum);
elog(LOG, "Block %u of \"%s\" is truncated",
blknum, file->path);
continue;
}
@ -1688,7 +1711,7 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
MAXALIGN(header.compressed_size), in);
if (read_len != MAXALIGN(header.compressed_size))
{
elog(WARNING, "cannot read block %u of \"%s\" read %zu of %d",
elog(WARNING, "Cannot read block %u of \"%s\" read %zu of %d",
blknum, file->path, read_len, header.compressed_size);
return false;
}
@ -1718,7 +1741,7 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
is_valid = false;
continue;
}
elog(WARNING, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
elog(WARNING, "Page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
file->path, uncompressed_size);
return false;
}
@ -1740,7 +1763,7 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
if (crc != file->crc)
{
elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X",
elog(WARNING, "Invalid CRC of backup file \"%s\": %X. Expected %X",
file->path, file->crc, crc);
is_valid = false;
}

View File

@ -24,72 +24,57 @@ do_delete(time_t backup_id)
parray *backup_list,
*delete_list;
pgBackup *target_backup = NULL;
time_t parent_id = 0;
XLogRecPtr oldest_lsn = InvalidXLogRecPtr;
TimeLineID oldest_tli = 0;
/* Get exclusive lock of backup catalog */
catalog_lock();
/* Get complete list of backups */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
if (backup_id != 0)
delete_list = parray_new();
/* Find backup to be deleted and make increment backups array to be deleted */
for (i = 0; i < parray_num(backup_list); i++)
{
delete_list = parray_new();
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
/* Find backup to be deleted and make increment backups array to be deleted */
for (i = (int) parray_num(backup_list) - 1; i >= 0; i--)
if (backup->start_time == backup_id)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, (size_t) i);
if (backup->start_time == backup_id)
{
parray_append(delete_list, backup);
/*
* Do not remove next backups, if target backup was finished
* incorrectly.
*/
if (backup->status == BACKUP_STATUS_ERROR)
break;
/* Save backup id to retreive increment backups */
parent_id = backup->start_time;
target_backup = backup;
}
else if (target_backup)
{
if (backup->backup_mode != BACKUP_MODE_FULL &&
backup->parent_backup == parent_id)
{
/* Append to delete list increment backup */
parray_append(delete_list, backup);
/* Save backup id to retreive increment backups */
parent_id = backup->start_time;
}
else
break;
}
target_backup = backup;
break;
}
if (parray_num(delete_list) == 0)
elog(ERROR, "no backup found, cannot delete");
/* Delete backups from the end of list */
for (i = (int) parray_num(delete_list) - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(delete_list, (size_t) i);
if (interrupted)
elog(ERROR, "interrupted during delete backup");
delete_backup_files(backup);
}
parray_free(delete_list);
}
/* sanity */
if (!target_backup)
elog(ERROR, "Failed to find backup %s, cannot delete", base36enc(backup_id));
/* form delete list */
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
/* check if backup is descendant of delete target */
if (is_parent(target_backup->start_time, backup, false))
parray_append(delete_list, backup);
}
parray_append(delete_list, target_backup);
/* Lock marked for delete backups */
catalog_lock_backup_list(delete_list, parray_num(delete_list) - 1, 0);
/* Delete backups from the end of list */
for (i = (int) parray_num(delete_list) - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(delete_list, (size_t) i);
if (interrupted)
elog(ERROR, "interrupted during delete backup");
delete_backup_files(backup);
}
parray_free(delete_list);
/* Clean WAL segments */
if (delete_wal)
{
@ -127,7 +112,6 @@ do_retention_purge(void)
size_t i;
XLogRecPtr oldest_lsn = InvalidXLogRecPtr;
TimeLineID oldest_tli = 0;
bool keep_next_backup = true; /* Do not delete first full backup */
bool backup_deleted = false; /* At least one backup was deleted */
if (delete_expired)
@ -146,9 +130,6 @@ do_retention_purge(void)
}
}
/* Get exclusive lock of backup catalog */
catalog_lock();
/* Get a complete list of backups. */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
if (parray_num(backup_list) == 0)
@ -162,6 +143,7 @@ do_retention_purge(void)
(instance_config.retention_redundancy > 0 ||
instance_config.retention_window > 0))
{
bool keep_next_backup = false; /* Do not delete first full backup */
time_t days_threshold;
uint32 backup_num = 0;
@ -206,9 +188,21 @@ do_retention_purge(void)
continue;
}
/*
* If the backup still is used do not interrupt go to the next
* backup.
*/
if (!lock_backup(backup))
{
elog(WARNING, "Cannot lock backup %s directory, skip purging",
base36enc(backup->start_time));
continue;
}
/* Delete backup and update status to DELETED */
delete_backup_files(backup);
backup_deleted = true;
keep_next_backup = false; /* reset it */
}
}
@ -240,7 +234,7 @@ do_retention_purge(void)
if (backup_deleted)
elog(INFO, "Purging finished");
else
elog(INFO, "Nothing to delete by retention policy");
elog(INFO, "There are no backups to delete by retention policy");
return 0;
}
@ -277,13 +271,12 @@ delete_backup_files(pgBackup *backup)
* Update STATUS to BACKUP_STATUS_DELETING in preparation for the case which
* the error occurs before deleting all backup files.
*/
backup->status = BACKUP_STATUS_DELETING;
write_backup_status(backup);
write_backup_status(backup, BACKUP_STATUS_DELETING);
/* list files to be deleted */
files = parray_new();
pgBackupGetPath(backup, path, lengthof(path), NULL);
dir_list_file(files, path, false, true, true);
dir_list_file(files, path, false, true, true, 0);
/* delete leaf node first */
parray_qsort(files, pgFileComparePathDesc);
@ -296,15 +289,10 @@ delete_backup_files(pgBackup *backup)
elog(INFO, "Progress: (%zd/%zd). Process file \"%s\"",
i + 1, num_files, file->path);
if (remove(file->path))
{
if (errno == ENOENT)
elog(VERBOSE, "File \"%s\" is absent", file->path);
else
elog(ERROR, "Cannot remove \"%s\": %s", file->path,
strerror(errno));
return;
}
if (interrupted)
elog(ERROR, "interrupted during delete backup");
pgFileDelete(file);
}
parray_walk(files, pgFileFree);
@ -438,6 +426,8 @@ do_delete_instance(void)
/* Delete all backups. */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
catalog_lock_backup_list(backup_list, 0, parray_num(backup_list) - 1);
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);

375
src/dir.c
View File

@ -118,17 +118,22 @@ typedef struct TablespaceCreatedList
static int BlackListCompare(const void *str1, const void *str2);
static bool dir_check_file(const char *root, pgFile *file);
static char dir_check_file(const char *root, pgFile *file);
static void dir_list_file_internal(parray *files, const char *root,
pgFile *parent, bool exclude,
bool omit_symlink, parray *black_list);
bool omit_symlink, parray *black_list,
int external_dir_num);
static void list_data_directories(parray *files, const char *path, bool is_root,
bool exclude);
static void opt_path_map(ConfigOption *opt, const char *arg,
TablespaceList *list, const char *type);
/* Tablespace mapping */
static TablespaceList tablespace_dirs = {NULL, NULL};
static TablespaceCreatedList tablespace_created_dirs = {NULL, NULL};
/* Extra directories mapping */
static TablespaceList external_remap_list = {NULL, NULL};
/*
* Create directory, also create parent directories if necessary.
@ -157,7 +162,7 @@ dir_create_dir(const char *dir, mode_t mode)
}
pgFile *
pgFileNew(const char *path, bool omit_symlink)
pgFileNew(const char *path, bool omit_symlink, int external_dir_num)
{
struct stat st;
pgFile *file;
@ -175,6 +180,7 @@ pgFileNew(const char *path, bool omit_symlink)
file = pgFileInit(path);
file->size = st.st_size;
file->mode = st.st_mode;
file->external_dir_num = external_dir_num;
return file;
}
@ -225,6 +231,7 @@ pgFileInit(const char *path)
/* Number of blocks readed during backup */
file->n_blocks = BLOCKNUM_INVALID;
file->compress_alg = NOT_DEFINED_COMPRESS;
file->external_dir_num = 0;
return file;
}
@ -345,6 +352,30 @@ pgFileComparePath(const void *f1, const void *f2)
return strcmp(f1p->path, f2p->path);
}
/*
* Compare two pgFile with their path and external_dir_num
* in ascending order of ASCII code.
*/
int
pgFileComparePathWithExternal(const void *f1, const void *f2)
{
pgFile *f1p = *(pgFile **)f1;
pgFile *f2p = *(pgFile **)f2;
int res;
res = strcmp(f1p->path, f2p->path);
if (!res)
{
if (f1p->external_dir_num > f2p->external_dir_num)
return 1;
else if (f1p->external_dir_num < f2p->external_dir_num)
return -1;
else
return 0;
}
return res;
}
/* Compare two pgFile with their path in descending order of ASCII code. */
int
pgFileComparePathDesc(const void *f1, const void *f2)
@ -352,6 +383,16 @@ pgFileComparePathDesc(const void *f1, const void *f2)
return -pgFileComparePath(f1, f2);
}
/*
* Compare two pgFile with their path and external_dir_num
* in descending order of ASCII code.
*/
int
pgFileComparePathWithExternalDesc(const void *f1, const void *f2)
{
return -pgFileComparePathWithExternal(f1, f2);
}
/* Compare two pgFile with their linked directory path. */
int
pgFileCompareLinked(const void *f1, const void *f2)
@ -392,7 +433,7 @@ BlackListCompare(const void *str1, const void *str2)
*/
void
dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
bool add_root)
bool add_root, int external_dir_num)
{
pgFile *file;
parray *black_list = NULL;
@ -415,6 +456,7 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
while (fgets(buf, lengthof(buf), black_list_file) != NULL)
{
black_item[0] = '\0';
join_path_components(black_item, instance_config.pgdata, buf);
if (black_item[strlen(black_item) - 1] == '\n')
@ -423,31 +465,46 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
if (black_item[0] == '#' || black_item[0] == '\0')
continue;
parray_append(black_list, black_item);
parray_append(black_list, pgut_strdup(black_item));
}
fclose(black_list_file);
parray_qsort(black_list, BlackListCompare);
}
file = pgFileNew(root, false);
file = pgFileNew(root, external_dir_num ? omit_symlink : false, omit_symlink);
if (file == NULL)
return;
if (!S_ISDIR(file->mode))
{
elog(WARNING, "Skip \"%s\": unexpected file format", file->path);
if (external_dir_num)
elog(ERROR, " --external-dirs option \"%s\": directory or symbolic link expected",
file->path);
else
elog(WARNING, "Skip \"%s\": unexpected file format", file->path);
return;
}
if (add_root)
parray_append(files, file);
dir_list_file_internal(files, root, file, exclude, omit_symlink, black_list);
dir_list_file_internal(files, root, file, exclude, omit_symlink, black_list,
external_dir_num);
if (!add_root)
pgFileFree(file);
if (black_list)
{
parray_walk(black_list, pfree);
parray_free(black_list);
}
}
#define CHECK_FALSE 0
#define CHECK_TRUE 1
#define CHECK_EXCLUDE_FALSE 2
/*
* Check file or directory.
*
@ -456,16 +513,21 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
* Skip files:
* - skip temp tables files
* - skip unlogged tables files
* Skip recursive tablespace content
* Set flags for:
* - database directories
* - datafiles
*/
static bool
static char
dir_check_file(const char *root, pgFile *file)
{
const char *rel_path;
int i;
int sscanf_res;
bool in_tablespace = false;
rel_path = GetRelativePath(file->path, root);
in_tablespace = path_is_prefix_of_path(PG_TBLSPC_DIR, rel_path);
/* Check if we need to exclude file by name */
if (S_ISREG(file->mode))
@ -478,7 +540,7 @@ dir_check_file(const char *root, pgFile *file)
{
/* Skip */
elog(VERBOSE, "Excluding file: %s", file->name);
return false;
return CHECK_FALSE;
}
}
@ -487,14 +549,14 @@ dir_check_file(const char *root, pgFile *file)
{
/* Skip */
elog(VERBOSE, "Excluding file: %s", file->name);
return false;
return CHECK_FALSE;
}
}
/*
* If the directory name is in the exclude list, do not list the
* contents.
*/
else if (S_ISDIR(file->mode))
else if (S_ISDIR(file->mode) && !in_tablespace)
{
/*
* If the item in the exclude list starts with '/', compare to
@ -510,20 +572,18 @@ dir_check_file(const char *root, pgFile *file)
{
elog(VERBOSE, "Excluding directory content: %s",
file->name);
return false;
return CHECK_EXCLUDE_FALSE;
}
}
else if (strcmp(file->name, pgdata_exclude_dir[i]) == 0)
{
elog(VERBOSE, "Excluding directory content: %s",
file->name);
return false;
return CHECK_EXCLUDE_FALSE;
}
}
}
rel_path = GetRelativePath(file->path, root);
/*
* Do not copy tablespaces twice. It may happen if the tablespace is located
* inside the PGDATA.
@ -539,14 +599,33 @@ dir_check_file(const char *root, pgFile *file)
* pg_tblspc/tblsOid/TABLESPACE_VERSION_DIRECTORY
*/
if (!path_is_prefix_of_path(PG_TBLSPC_DIR, rel_path))
return false;
return CHECK_FALSE;
sscanf_res = sscanf(rel_path, PG_TBLSPC_DIR "/%u/%s",
&tblspcOid, tmp_rel_path);
if (sscanf_res == 0)
return false;
return CHECK_FALSE;
}
if (path_is_prefix_of_path("global", rel_path))
if (in_tablespace)
{
char tmp_rel_path[MAXPGPATH];
sscanf_res = sscanf(rel_path, PG_TBLSPC_DIR "/%u/%[^/]/%u/",
&(file->tblspcOid), tmp_rel_path,
&(file->dbOid));
/*
* We should skip other files and directories rather than
* TABLESPACE_VERSION_DIRECTORY, if this is recursive tablespace.
*/
if (sscanf_res == 2 && strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) != 0)
return CHECK_FALSE;
if (sscanf_res == 3 && S_ISDIR(file->mode) &&
strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) == 0)
file->is_database = true;
}
else if (path_is_prefix_of_path("global", rel_path))
{
file->tblspcOid = GLOBALTABLESPACE_OID;
@ -562,22 +641,10 @@ dir_check_file(const char *root, pgFile *file)
if (S_ISDIR(file->mode) && strcmp(file->name, "base") != 0)
file->is_database = true;
}
else if (path_is_prefix_of_path(PG_TBLSPC_DIR, rel_path))
{
char tmp_rel_path[MAXPGPATH];
sscanf_res = sscanf(rel_path, PG_TBLSPC_DIR "/%u/%[^/]/%u/",
&(file->tblspcOid), tmp_rel_path,
&(file->dbOid));
if (sscanf_res == 3 && S_ISDIR(file->mode) &&
strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) == 0)
file->is_database = true;
}
/* Do not backup ptrack_init files */
if (S_ISREG(file->mode) && strcmp(file->name, "ptrack_init") == 0)
return false;
return CHECK_FALSE;
/*
* Check files located inside database directories including directory
@ -587,10 +654,10 @@ dir_check_file(const char *root, pgFile *file)
file->name && file->name[0])
{
if (strcmp(file->name, "pg_internal.init") == 0)
return false;
return CHECK_FALSE;
/* Do not backup temp files */
else if (file->name[0] == 't' && isdigit(file->name[1]))
return false;
return CHECK_FALSE;
else if (isdigit(file->name[0]))
{
char *fork_name;
@ -605,14 +672,22 @@ dir_check_file(const char *root, pgFile *file)
/* Do not backup ptrack files */
if (strcmp(file->forkName, "ptrack") == 0)
return false;
return CHECK_FALSE;
}
else
{
/*
* snapfs files:
* RELFILENODE.BLOCKNO.snapmap.SNAPID
* RELFILENODE.BLOCKNO.snap.SNAPID
*/
if (strstr(file->name, "snap") != NULL)
return true;
len = strlen(file->name);
/* reloid.cfm */
if (len > 3 && strcmp(file->name + len - 3, "cfm") == 0)
return true;
return CHECK_TRUE;
sscanf_res = sscanf(file->name, "%u.%d.%s", &(file->relOid),
&(file->segno), suffix);
@ -624,7 +699,7 @@ dir_check_file(const char *root, pgFile *file)
}
}
return true;
return CHECK_TRUE;
}
/*
@ -633,7 +708,8 @@ dir_check_file(const char *root, pgFile *file)
*/
static void
dir_list_file_internal(parray *files, const char *root, pgFile *parent,
bool exclude, bool omit_symlink, parray *black_list)
bool exclude, bool omit_symlink, parray *black_list,
int external_dir_num)
{
DIR *dir;
struct dirent *dent;
@ -659,10 +735,11 @@ dir_list_file_internal(parray *files, const char *root, pgFile *parent,
{
pgFile *file;
char child[MAXPGPATH];
char check_res;
join_path_components(child, parent->path, dent->d_name);
file = pgFileNew(child, omit_symlink);
file = pgFileNew(child, omit_symlink, external_dir_num);
if (file == NULL)
continue;
@ -694,21 +771,24 @@ dir_list_file_internal(parray *files, const char *root, pgFile *parent,
continue;
}
/* We add the directory anyway */
if (S_ISDIR(file->mode))
parray_append(files, file);
if (exclude && !dir_check_file(root, file))
if (exclude)
{
if (S_ISREG(file->mode))
check_res = dir_check_file(root, file);
if (check_res == CHECK_FALSE)
{
/* Skip */
pgFileFree(file);
/* Skip */
continue;
continue;
}
else if (check_res == CHECK_EXCLUDE_FALSE)
{
/* We add the directory itself which content was excluded */
parray_append(files, file);
continue;
}
}
/* At least add the file */
if (S_ISREG(file->mode))
parray_append(files, file);
parray_append(files, file);
/*
* If the entry is a directory call dir_list_file_internal()
@ -716,7 +796,7 @@ dir_list_file_internal(parray *files, const char *root, pgFile *parent,
*/
if (S_ISDIR(file->mode))
dir_list_file_internal(files, root, file, exclude, omit_symlink,
black_list);
black_list, external_dir_num);
}
if (errno && errno != ENOENT)
@ -796,7 +876,7 @@ list_data_directories(parray *files, const char *path, bool is_root,
{
pgFile *dir;
dir = pgFileNew(path, false);
dir = pgFileNew(path, false, 0);
parray_append(files, dir);
}
@ -863,19 +943,21 @@ get_tablespace_created(const char *link)
}
/*
* Split argument into old_dir and new_dir and append to tablespace mapping
* Split argument into old_dir and new_dir and append to mapping
* list.
*
* Copy of function tablespace_list_append() from pg_basebackup.c.
*/
void
opt_tablespace_map(ConfigOption *opt, const char *arg)
static void
opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list,
const char *type)
{
TablespaceListCell *cell = pgut_new(TablespaceListCell);
char *dst;
char *dst_ptr;
const char *arg_ptr;
memset(cell, 0, sizeof(TablespaceListCell));
dst_ptr = dst = cell->old_dir;
for (arg_ptr = arg; *arg_ptr; arg_ptr++)
{
@ -887,7 +969,7 @@ opt_tablespace_map(ConfigOption *opt, const char *arg)
else if (*arg_ptr == '=' && (arg_ptr == arg || *(arg_ptr - 1) != '\\'))
{
if (*cell->new_dir)
elog(ERROR, "multiple \"=\" signs in tablespace mapping\n");
elog(ERROR, "multiple \"=\" signs in %s mapping\n", type);
else
dst = dst_ptr = cell->new_dir;
}
@ -896,8 +978,8 @@ opt_tablespace_map(ConfigOption *opt, const char *arg)
}
if (!*cell->old_dir || !*cell->new_dir)
elog(ERROR, "invalid tablespace mapping format \"%s\", "
"must be \"OLDDIR=NEWDIR\"", arg);
elog(ERROR, "invalid %s mapping format \"%s\", "
"must be \"OLDDIR=NEWDIR\"", type, arg);
/*
* This check isn't absolutely necessary. But all tablespaces are created
@ -906,18 +988,32 @@ opt_tablespace_map(ConfigOption *opt, const char *arg)
* consistent with the new_dir check.
*/
if (!is_absolute_path(cell->old_dir))
elog(ERROR, "old directory is not an absolute path in tablespace mapping: %s\n",
cell->old_dir);
elog(ERROR, "old directory is not an absolute path in %s mapping: %s\n",
type, cell->old_dir);
if (!is_absolute_path(cell->new_dir))
elog(ERROR, "new directory is not an absolute path in tablespace mapping: %s\n",
cell->new_dir);
elog(ERROR, "new directory is not an absolute path in %s mapping: %s\n",
type, cell->new_dir);
if (tablespace_dirs.tail)
tablespace_dirs.tail->next = cell;
if (list->tail)
list->tail->next = cell;
else
tablespace_dirs.head = cell;
tablespace_dirs.tail = cell;
list->head = cell;
list->tail = cell;
}
/* Parse tablespace mapping */
void
opt_tablespace_map(ConfigOption *opt, const char *arg)
{
opt_path_map(opt, arg, &tablespace_dirs, "tablespace");
}
/* Parse external directories mapping */
void
opt_externaldir_map(ConfigOption *opt, const char *arg)
{
opt_path_map(opt, arg, &external_remap_list, "external directory");
}
/*
@ -1021,11 +1117,11 @@ create_data_directories(const char *data_dir, const char *backup_dir,
}
if (link_sep)
elog(LOG, "create directory \"%s\" and symbolic link \"%.*s\"",
elog(VERBOSE, "create directory \"%s\" and symbolic link \"%.*s\"",
linked_path,
(int) (link_sep - relative_ptr), relative_ptr);
else
elog(LOG, "create directory \"%s\" and symbolic link \"%s\"",
elog(VERBOSE, "create directory \"%s\" and symbolic link \"%s\"",
linked_path, relative_ptr);
/* Firstly, create linked directory */
@ -1056,7 +1152,7 @@ create_data_directories(const char *data_dir, const char *backup_dir,
}
create_directory:
elog(LOG, "create directory \"%s\"", relative_ptr);
elog(VERBOSE, "create directory \"%s\"", relative_ptr);
/* This is not symlink, create directory */
join_path_components(to_path, data_dir, relative_ptr);
@ -1190,11 +1286,66 @@ check_tablespace_mapping(pgBackup *backup)
parray_free(links);
}
void
check_external_dir_mapping(pgBackup *backup)
{
TablespaceListCell *cell;
parray *external_dirs_to_restore;
bool found;
int i;
if (!backup->external_dir_str)
{
if (external_remap_list.head)
elog(ERROR, "--external-mapping option's old directory doesn't "
"have an entry in list of external directories of current "
"backup: \"%s\"", external_remap_list.head->old_dir);
return;
}
external_dirs_to_restore = make_external_directory_list(backup->external_dir_str);
for (cell = external_remap_list.head; cell; cell = cell->next)
{
char *old_dir = cell->old_dir;
found = false;
for (i = 0; i < parray_num(external_dirs_to_restore); i++)
{
char *external_dir = parray_get(external_dirs_to_restore, i);
if (strcmp(old_dir, external_dir) == 0)
{
found = true;
break;
}
}
if (!found)
elog(ERROR, "--external-mapping option's old directory doesn't "
"have an entry in list of external directories of current "
"backup: \"%s\"", cell->old_dir);
}
}
char *
get_external_remap(char *current_dir)
{
TablespaceListCell *cell;
for (cell = external_remap_list.head; cell; cell = cell->next)
{
char *old_dir = cell->old_dir;
if (strcmp(old_dir, current_dir) == 0)
return cell->new_dir;
}
return current_dir;
}
/*
* Print backup content list.
*/
void
print_file_list(FILE *out, const parray *files, const char *root)
print_file_list(FILE *out, const parray *files, const char *root,
const char *external_prefix, parray *external_list)
{
size_t i;
@ -1207,23 +1358,25 @@ print_file_list(FILE *out, const parray *files, const char *root)
/* omit root directory portion */
if (root && strstr(path, root) == path)
path = GetRelativePath(path, root);
else if (file->external_dir_num && !external_prefix)
{
Assert(external_list);
path = GetRelativePath(path, parray_get(external_list,
file->external_dir_num - 1));
}
fprintf(out, "{\"path\":\"%s\", \"size\":\"" INT64_FORMAT "\", "
"\"mode\":\"%u\", \"is_datafile\":\"%u\", "
"\"is_cfs\":\"%u\", \"crc\":\"%u\", "
"\"compress_alg\":\"%s\"",
"\"compress_alg\":\"%s\", \"external_dir_num\":\"%d\"",
path, file->write_size, file->mode,
file->is_datafile ? 1 : 0, file->is_cfs ? 1 : 0, file->crc,
deparse_compress_alg(file->compress_alg));
deparse_compress_alg(file->compress_alg), file->external_dir_num);
if (file->is_datafile)
fprintf(out, ",\"segno\":\"%d\"", file->segno);
#ifndef WIN32
if (S_ISLNK(file->mode))
#else
if (pgwin32_is_junction(file->path))
#endif
if (file->linked)
fprintf(out, ",\"linked\":\"%s\"", file->linked);
if (file->n_blocks != BLOCKNUM_INVALID)
@ -1381,7 +1534,8 @@ bad_format:
* If root is not NULL, path will be absolute path.
*/
parray *
dir_read_file_list(const char *root, const char *file_txt)
dir_read_file_list(const char *root, const char *external_prefix,
const char *file_txt)
{
FILE *fp;
parray *files;
@ -1403,6 +1557,7 @@ dir_read_file_list(const char *root, const char *file_txt)
mode, /* bit length of mode_t depends on platforms */
is_datafile,
is_cfs,
external_dir_num,
crc,
segno,
n_blocks;
@ -1415,8 +1570,16 @@ dir_read_file_list(const char *root, const char *file_txt)
get_control_value(buf, "is_cfs", NULL, &is_cfs, false);
get_control_value(buf, "crc", NULL, &crc, true);
get_control_value(buf, "compress_alg", compress_alg_string, NULL, false);
get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false);
if (root)
if (external_dir_num && external_prefix)
{
char temp[MAXPGPATH];
makeExternalDirPathByNum(temp, external_prefix, external_dir_num);
join_path_components(filepath, temp, path);
}
else if (root)
join_path_components(filepath, root, path);
else
strcpy(filepath, path);
@ -1429,6 +1592,7 @@ dir_read_file_list(const char *root, const char *file_txt)
file->is_cfs = is_cfs ? true : false;
file->crc = (pg_crc32) crc;
file->compress_alg = parse_compress_alg(compress_alg_string);
file->external_dir_num = external_dir_num;
/*
* Optional fields
@ -1514,3 +1678,56 @@ pgFileSize(const char *path)
return buf.st_size;
}
/*
* Construct parray containing external directories paths
* from string like /path1:/path2
*/
parray *
make_external_directory_list(const char *colon_separated_dirs)
{
char *p;
parray *list = parray_new();
char *tmp = pg_strdup(colon_separated_dirs);
p = strtok(tmp,":");
while(p!=NULL)
{
if (is_absolute_path(p))
parray_append(list, pg_strdup(p));
else
elog(ERROR, "External directory \"%s\" is not an absolute path", p);
p=strtok(NULL,":");
}
pfree(tmp);
parray_qsort(list, BlackListCompare);
return list;
}
/* Free memory of parray containing strings */
void
free_dir_list(parray *list)
{
parray_walk(list, pfree);
parray_free(list);
}
/* Append to string "path_prefix" int "dir_num" */
void
makeExternalDirPathByNum(char *ret_path, const char *path_prefix,
const int dir_num)
{
sprintf(ret_path, "%s%d", path_prefix, dir_num);
}
/* Check if "dir" presents in "dirs_list" */
bool
backup_contains_external(const char *dir, parray *dirs_list)
{
void *search_result;
if (!dirs_list) /* There is no external dirs in backup */
return false;
search_result = parray_bsearch(dirs_list, dir, BlackListCompare);
return search_result != NULL;
}

View File

@ -97,9 +97,9 @@ help_pg_probackup(void)
printf(_(" [--format=format]\n"));
printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-C] [--stream [-S slot-name]] [--backup-pg-log]\n"));
printf(_(" [-j num-threads] [--archive-timeout=archive-timeout]\n"));
printf(_(" [--progress]\n"));
printf(_(" [-C] [--stream [-S slot-name]] [--temp-slot]\n"));
printf(_(" [--backup-pg-log] [-j num-threads]\n"));
printf(_(" [--archive-timeout=archive-timeout] [--progress]\n"));
printf(_(" [--log-level-console=log-level-console]\n"));
printf(_(" [--log-level-file=log-level-file]\n"));
printf(_(" [--log-filename=log-filename]\n"));
@ -119,19 +119,22 @@ help_pg_probackup(void)
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
printf(_(" [--replica-timeout=timeout]\n"));
printf(_(" [--skip-block-validation]\n"));
printf(_(" [--external-dirs=external-directory-path]\n"));
printf(_("\n %s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-path] [-i backup-id] [--progress]\n"));
printf(_(" [-D pgdata-path] [-i backup-id] [-j num-threads]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--timeline=timeline] [-T OLDDIR=NEWDIR]\n"));
printf(_(" [--timeline=timeline] [-T OLDDIR=NEWDIR] [--progress]\n"));
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
printf(_(" [--immediate] [--recovery-target-name=target-name]\n"));
printf(_(" [--recovery-target-action=pause|promote|shutdown]\n"));
printf(_(" [--restore-as-replica]\n"));
printf(_(" [--no-validate]\n"));
printf(_(" [--skip-block-validation]\n"));
printf(_(" [--skip-external-dirs]\n"));
printf(_("\n %s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME);
printf(_(" [-i backup-id] [--progress]\n"));
printf(_(" [-i backup-id] [--progress] [-j num-threads]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--recovery-target-name=target-name]\n"));
printf(_(" [--timeline=timeline]\n"));
@ -143,8 +146,9 @@ help_pg_probackup(void)
printf(_("\n %s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--wal] [-i backup-id | --expired]\n"));
printf(_("\n %s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" -i backup-id\n"));
printf(_(" -i backup-id [--progress] [-j num-threads]\n"));
printf(_("\n %s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME);
printf(_(" --instance=instance_name\n"));
@ -186,9 +190,9 @@ static void
help_backup(void)
{
printf(_("%s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-C] [--stream [-S slot-name]] [--backup-pg-log]\n"));
printf(_(" [-j num-threads] [--archive-timeout=archive-timeout]\n"));
printf(_(" [--progress]\n"));
printf(_(" [-C] [--stream [-S slot-name] [--temp-slot]\n"));
printf(_(" [--backup-pg-log] [-j num-threads]\n"));
printf(_(" [--archive-timeout=archive-timeout] [--progress]\n"));
printf(_(" [--log-level-console=log-level-console]\n"));
printf(_(" [--log-level-file=log-level-file]\n"));
printf(_(" [--log-filename=log-filename]\n"));
@ -207,7 +211,8 @@ help_backup(void)
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
printf(_(" [--replica-timeout=timeout]\n"));
printf(_(" [--skip-block-validation]\n\n"));
printf(_(" [--skip-block-validation]\n"));
printf(_(" [-E external-dirs=external-directory-path]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n"));
@ -215,11 +220,15 @@ help_backup(void)
printf(_(" -C, --smooth-checkpoint do smooth checkpoint before backup\n"));
printf(_(" --stream stream the transaction log and include it in the backup\n"));
printf(_(" -S, --slot=SLOTNAME replication slot to use\n"));
printf(_(" --backup-pg-log backup of pg_log directory\n"));
printf(_(" --temp-slot use temporary replication slot\n"));
printf(_(" --backup-pg-log backup of '%s' directory\n"), PG_LOG_DIR);
printf(_(" -j, --threads=NUM number of parallel threads\n"));
printf(_(" --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n"));
printf(_(" --progress show progress\n"));
printf(_(" --skip-block-validation set to validate only file-level checksum\n"));
printf(_(" -E --external-dirs=external-directory-path\n"));
printf(_(" backup some directories not from pgdata \n"));
printf(_(" (example: --external-dirs=/tmp/dir1:/tmp/dir2)\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
@ -252,9 +261,9 @@ help_backup(void)
printf(_(" number of days of recoverability; 0 disables; (default: 0)\n"));
printf(_("\n Compression options:\n"));
printf(_(" --compress compress data files\n"));
printf(_(" --compress alias for --compress-algorithm='zlib' and --compress-level=1\n"));
printf(_(" --compress-algorithm=compress-algorithm\n"));
printf(_(" available options: 'zlib', 'pglz', 'none' (default: zlib)\n"));
printf(_(" available options: 'zlib', 'pglz', 'none' (default: none)\n"));
printf(_(" --compress-level=compress-level\n"));
printf(_(" level of compression [0-9] (default: 1)\n"));
@ -267,30 +276,33 @@ help_backup(void)
printf(_(" -W, --password force password prompt\n"));
printf(_("\n Replica options:\n"));
printf(_(" --master-user=user_name user name to connect to master\n"));
printf(_(" --master-db=db_name database to connect to master\n"));
printf(_(" --master-host=host_name database server host of master\n"));
printf(_(" --master-port=port database server port of master\n"));
printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (default: 5min)\n"));
printf(_(" --master-user=user_name user name to connect to master (deprecated)\n"));
printf(_(" --master-db=db_name database to connect to master (deprecated)\n"));
printf(_(" --master-host=host_name database server host of master (deprecated)\n"));
printf(_(" --master-port=port database server port of master (deprecated)\n"));
printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n"));
}
static void
help_restore(void)
{
printf(_("%s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-path] [-i backup-id] [--progress]\n"));
printf(_(" [-D pgdata-path] [-i backup-id] [-j num-threads] [--progress]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--timeline=timeline] [-T OLDDIR=NEWDIR]\n"));
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
printf(_(" [--immediate] [--recovery-target-name=target-name]\n"));
printf(_(" [--recovery-target-action=pause|promote|shutdown]\n"));
printf(_(" [--restore-as-replica] [--no-validate]\n\n"));
printf(_(" [--skip-block-validation]\n\n"));
printf(_(" [--restore-as-replica] [--no-validate]\n"));
printf(_(" [--skip-block-validation]\n"));
printf(_(" [--skip-external-dirs]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n"));
printf(_(" -i, --backup-id=backup-id backup to restore\n"));
printf(_(" -j, --threads=NUM number of parallel threads\n"));
printf(_(" --progress show progress\n"));
printf(_(" --time=time time stamp up to which recovery will proceed\n"));
@ -300,6 +312,8 @@ help_restore(void)
printf(_(" --timeline=timeline recovering into a particular timeline\n"));
printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n"));
printf(_(" relocate the tablespace from directory OLDDIR to NEWDIR\n"));
printf(_(" --external-mapping=OLDDIR=NEWDIR\n"));
printf(_(" relocate the external directory from OLDDIR to NEWDIR\n"));
printf(_(" --immediate end recovery as soon as a consistent state is reached\n"));
printf(_(" --recovery-target-name=target-name\n"));
@ -312,6 +326,7 @@ help_restore(void)
printf(_(" to ease setting up a standby server\n"));
printf(_(" --no-validate disable backup validation during restore\n"));
printf(_(" --skip-block-validation set to validate only file-level checksum\n"));
printf(_(" --skip-external-dirs do not restore all external directories\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
@ -349,6 +364,7 @@ help_validate(void)
printf(_(" -i, --backup-id=backup-id backup to validate\n"));
printf(_(" --progress show progress\n"));
printf(_(" -j, --threads=NUM number of parallel threads\n"));
printf(_(" --time=time time stamp up to which recovery will proceed\n"));
printf(_(" --xid=xid transaction ID up to which recovery will proceed\n"));
printf(_(" --lsn=lsn LSN of the write-ahead log location up to which recovery will proceed\n"));
@ -488,11 +504,15 @@ help_set_config(void)
printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n"));
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
printf(_(" [--replica-timeout=timeout]\n\n"));
printf(_(" [--archive-timeout=timeout]\n\n"));
printf(_(" [--replica-timeout=timeout]\n"));
printf(_(" [--archive-timeout=timeout]\n"));
printf(_(" [-E external-dirs=external-directory-path]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" -E --external-dirs=external-directory-path\n"));
printf(_(" backup some directories not from pgdata \n"));
printf(_(" (example: --external-dirs=/tmp/dir1:/tmp/dir2)\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
@ -522,8 +542,9 @@ help_set_config(void)
printf(_(" number of days of recoverability; 0 disables; (default: 0)\n"));
printf(_("\n Compression options:\n"));
printf(_(" --compress alias for --compress-algorithm='zlib' and --compress-level=1\n"));
printf(_(" --compress-algorithm=compress-algorithm\n"));
printf(_(" available options: 'zlib','pglz','none'\n"));
printf(_(" available options: 'zlib','pglz','none' (default: 'none')\n"));
printf(_(" --compress-level=compress-level\n"));
printf(_(" level of compression [0-9] (default: 1)\n"));
@ -533,14 +554,15 @@ help_set_config(void)
printf(_(" -h, --host=HOSTNAME database server host or socket directory(default: 'local socket')\n"));
printf(_(" -p, --port=PORT database server port (default: 5432)\n"));
printf(_("\n Replica options:\n"));
printf(_(" --master-user=user_name user name to connect to master\n"));
printf(_(" --master-db=db_name database to connect to master\n"));
printf(_(" --master-host=host_name database server host of master\n"));
printf(_(" --master-port=port database server port of master\n"));
printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (default: 5min)\n"));
printf(_("\n Archive options:\n"));
printf(_(" --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n"));
printf(_("\n Replica options:\n"));
printf(_(" --master-user=user_name user name to connect to master (deprecated)\n"));
printf(_(" --master-db=db_name database to connect to master (deprecated)\n"));
printf(_(" --master-host=host_name database server host of master (deprecated)\n"));
printf(_(" --master-port=port database server port of master (deprecated)\n"));
printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n"));
}
static void
@ -558,11 +580,15 @@ static void
help_add_instance(void)
{
printf(_("%s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME);
printf(_(" --instance=instance_name\n\n"));
printf(_(" --instance=instance_name\n"));
printf(_(" -E external-dirs=external-directory-path\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n"));
printf(_(" --instance=instance_name name of the new instance\n"));
printf(_(" -E --external-dirs=external-directory-path\n"));
printf(_(" backup some directories not from pgdata \n"));
printf(_(" (example: --external-dirs=/tmp/dir1:/tmp/dir2)\n"));
}
static void
@ -591,9 +617,9 @@ help_archive_push(void)
printf(_(" relative path name of the WAL file on the server\n"));
printf(_(" --wal-file-name=wal-file-name\n"));
printf(_(" name of the WAL file to retrieve from the server\n"));
printf(_(" --compress compress WAL file during archiving\n"));
printf(_(" --compress alias for --compress-algorithm='zlib' and --compress-level=1\n"));
printf(_(" --compress-algorithm=compress-algorithm\n"));
printf(_(" available options: 'zlib','none'\n"));
printf(_(" available options: 'zlib', 'none' (default: 'none')\n"));
printf(_(" --compress-level=compress-level\n"));
printf(_(" level of compression [0-9] (default: 1)\n"));
printf(_(" --overwrite overwrite archived WAL file\n"));

View File

@ -104,7 +104,7 @@ do_add_instance(void)
config_set_opt(instance_options, &instance_config.xlog_seg_size,
SOURCE_FILE);
/* pgdata was set through command line */
do_set_config();
do_set_config(true);
elog(INFO, "Instance '%s' successfully inited", instance_name);
return 0;

View File

@ -18,12 +18,14 @@ typedef struct
{
parray *to_files;
parray *files;
parray *from_external;
pgBackup *to_backup;
pgBackup *from_backup;
const char *to_root;
const char *from_root;
const char *to_external_prefix;
const char *from_external_prefix;
/*
* Return value from the thread.
@ -34,6 +36,11 @@ typedef struct
static void merge_backups(pgBackup *backup, pgBackup *next_backup);
static void *merge_files(void *arg);
static void
reorder_external_dirs(pgBackup *to_backup, parray *to_external,
parray *from_external);
static int
get_external_index(const char *key, const parray *list);
/*
* Implementation of MERGE command.
@ -46,12 +53,10 @@ void
do_merge(time_t backup_id)
{
parray *backups;
parray *merge_list = parray_new();
pgBackup *dest_backup = NULL;
pgBackup *full_backup = NULL;
time_t prev_parent = INVALID_BACKUP_ID;
int i;
int dest_backup_idx = 0;
int full_backup_idx = 0;
if (backup_id == INVALID_BACKUP_ID)
elog(ERROR, "required parameter is not specified: --backup-id");
@ -61,76 +66,82 @@ do_merge(time_t backup_id)
elog(INFO, "Merge started");
catalog_lock();
/* Get list of all backups sorted in order of descending start time */
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
/* Find destination and parent backups */
/* Find destination backup first */
for (i = 0; i < parray_num(backups); i++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
if (backup->start_time > backup_id)
continue;
else if (backup->start_time == backup_id && !dest_backup)
/* found target */
if (backup->start_time == backup_id)
{
/* sanity */
if (backup->status != BACKUP_STATUS_OK &&
/* It is possible that previous merging was interrupted */
backup->status != BACKUP_STATUS_MERGING &&
backup->status != BACKUP_STATUS_DELETING)
elog(ERROR, "Backup %s has status: %s",
base36enc(backup->start_time), status2str(backup->status));
elog(ERROR, "Backup %s has status: %s",
base36enc(backup->start_time), status2str(backup->status));
if (backup->backup_mode == BACKUP_MODE_FULL)
elog(ERROR, "Backup %s is full backup",
base36enc(backup->start_time));
dest_backup = backup;
dest_backup_idx = i;
break;
}
else
{
if (dest_backup == NULL)
elog(ERROR, "Target backup %s was not found", base36enc(backup_id));
if (backup->start_time != prev_parent)
continue;
if (backup->status != BACKUP_STATUS_OK &&
/* It is possible that previous merging was interrupted */
backup->status != BACKUP_STATUS_MERGING)
elog(ERROR, "Backup %s has status: %s",
base36enc(backup->start_time), status2str(backup->status));
/* If we already found dest_backup, look for full backup */
if (dest_backup && backup->backup_mode == BACKUP_MODE_FULL)
{
full_backup = backup;
full_backup_idx = i;
/* Found target and full backups, so break the loop */
break;
}
}
prev_parent = backup->parent_backup;
}
/* sanity */
if (dest_backup == NULL)
elog(ERROR, "Target backup %s was not found", base36enc(backup_id));
/* get full backup */
full_backup = find_parent_full_backup(dest_backup);
/* sanity */
if (full_backup == NULL)
elog(ERROR, "Parent full backup for the given backup %s was not found",
base36enc(backup_id));
Assert(full_backup_idx != dest_backup_idx);
/* sanity */
if (full_backup->status != BACKUP_STATUS_OK &&
/* It is possible that previous merging was interrupted */
full_backup->status != BACKUP_STATUS_MERGING)
elog(ERROR, "Backup %s has status: %s",
base36enc(full_backup->start_time), status2str(full_backup->status));
//Assert(full_backup_idx != dest_backup_idx);
/* form merge list */
while(dest_backup->parent_backup_link)
{
/* sanity */
if (dest_backup->status != BACKUP_STATUS_OK &&
/* It is possible that previous merging was interrupted */
dest_backup->status != BACKUP_STATUS_MERGING &&
dest_backup->status != BACKUP_STATUS_DELETING)
elog(ERROR, "Backup %s has status: %s",
base36enc(dest_backup->start_time), status2str(dest_backup->status));
parray_append(merge_list, dest_backup);
dest_backup = dest_backup->parent_backup_link;
}
/* Add FULL backup for easy locking */
parray_append(merge_list, full_backup);
/* Lock merge chain */
catalog_lock_backup_list(merge_list, parray_num(merge_list) - 1, 0);
/*
* Found target and full backups, merge them and intermediate backups
*/
for (i = full_backup_idx; i > dest_backup_idx; i--)
for (i = parray_num(merge_list) - 2; i >= 0; i--)
{
pgBackup *from_backup = (pgBackup *) parray_get(backups, i - 1);
pgBackup *from_backup = (pgBackup *) parray_get(merge_list, i);
merge_backups(full_backup, from_backup);
}
@ -142,6 +153,7 @@ do_merge(time_t backup_id)
/* cleanup */
parray_walk(backups, pgBackupFree);
parray_free(backups);
parray_free(merge_list);
elog(INFO, "Merge of backup %s completed", base36enc(backup_id));
}
@ -159,16 +171,22 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
*from_backup_id = base36enc_dup(from_backup->start_time);
char to_backup_path[MAXPGPATH],
to_database_path[MAXPGPATH],
to_external_prefix[MAXPGPATH],
from_backup_path[MAXPGPATH],
from_database_path[MAXPGPATH],
from_external_prefix[MAXPGPATH],
control_file[MAXPGPATH];
parray *files,
*to_files;
parray *to_external = NULL,
*from_external = NULL;
pthread_t *threads = NULL;
merge_files_arg *threads_args = NULL;
int i;
time_t merge_time;
bool merge_isok = true;
merge_time = time(NULL);
elog(INFO, "Merging backup %s with backup %s", from_backup_id, to_backup_id);
/*
@ -199,19 +217,20 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
pgBackupGetPath(to_backup, to_backup_path, lengthof(to_backup_path), NULL);
pgBackupGetPath(to_backup, to_database_path, lengthof(to_database_path),
DATABASE_DIR);
pgBackupGetPath(to_backup, to_external_prefix, lengthof(to_database_path),
EXTERNAL_DIR);
pgBackupGetPath(from_backup, from_backup_path, lengthof(from_backup_path), NULL);
pgBackupGetPath(from_backup, from_database_path, lengthof(from_database_path),
DATABASE_DIR);
pgBackupGetPath(from_backup, from_external_prefix, lengthof(from_database_path),
EXTERNAL_DIR);
/*
* Get list of files which will be modified or removed.
*/
pgBackupGetPath(to_backup, control_file, lengthof(control_file),
DATABASE_FILE_LIST);
to_files = dir_read_file_list(from_database_path, /* Use from_database_path
* so root path will be
* equal with 'files' */
control_file);
to_files = dir_read_file_list(NULL, NULL, control_file);
/* To delete from leaf, sort in reversed order */
parray_qsort(to_files, pgFileComparePathDesc);
/*
@ -219,7 +238,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
*/
pgBackupGetPath(from_backup, control_file, lengthof(control_file),
DATABASE_FILE_LIST);
files = dir_read_file_list(from_database_path, control_file);
files = dir_read_file_list(NULL, NULL, control_file);
/* sort by size for load balancing */
parray_qsort(files, pgFileCompareSize);
@ -230,25 +249,47 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
if (from_backup->status == BACKUP_STATUS_DELETING)
goto delete_source_backup;
to_backup->status = BACKUP_STATUS_MERGING;
write_backup_status(to_backup);
from_backup->status = BACKUP_STATUS_MERGING;
write_backup_status(from_backup);
write_backup_status(to_backup, BACKUP_STATUS_MERGING);
write_backup_status(from_backup, BACKUP_STATUS_MERGING);
create_data_directories(to_database_path, from_backup_path, false);
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
threads_args = (merge_files_arg *) palloc(sizeof(merge_files_arg) * num_threads);
/* Create external directories lists */
if (to_backup->external_dir_str)
to_external = make_external_directory_list(to_backup->external_dir_str);
if (from_backup->external_dir_str)
from_external = make_external_directory_list(from_backup->external_dir_str);
/*
* Rename external directoties in to_backup (if exists)
* according to numeration of external dirs in from_backup.
*/
if (to_external)
reorder_external_dirs(to_backup, to_external, from_external);
/* Setup threads */
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
/* if the entry was an external directory, create it in the backup */
if (file->external_dir_num && S_ISDIR(file->mode))
{
char dirpath[MAXPGPATH];
char new_container[MAXPGPATH];
makeExternalDirPathByNum(new_container, to_external_prefix,
file->external_dir_num);
join_path_components(dirpath, new_container, file->path);
dir_create_dir(dirpath, DIR_PERMISSION);
}
pg_atomic_init_flag(&file->lock);
}
thread_interrupted = false;
for (i = 0; i < num_threads; i++)
{
merge_files_arg *arg = &(threads_args[i]);
@ -259,6 +300,9 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
arg->from_backup = from_backup;
arg->to_root = to_database_path;
arg->from_root = from_database_path;
arg->from_external = from_external;
arg->to_external_prefix = to_external_prefix;
arg->from_external_prefix = from_external_prefix;
/* By default there are some error */
arg->ret = 1;
@ -281,16 +325,23 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
* Update to_backup metadata.
*/
to_backup->status = BACKUP_STATUS_OK;
StrNCpy(to_backup->program_version, PROGRAM_VERSION,
sizeof(to_backup->program_version));
to_backup->parent_backup = INVALID_BACKUP_ID;
to_backup->start_lsn = from_backup->start_lsn;
to_backup->stop_lsn = from_backup->stop_lsn;
to_backup->recovery_time = from_backup->recovery_time;
to_backup->recovery_xid = from_backup->recovery_xid;
pfree(to_backup->external_dir_str);
to_backup->external_dir_str = from_backup->external_dir_str;
from_backup->external_dir_str = NULL; /* For safe pgBackupFree() */
to_backup->merge_time = merge_time;
to_backup->end_time = time(NULL);
/*
* If one of the backups isn't "stream" backup then the target backup become
* non-stream backup too.
* Target backup must inherit wal mode too.
*/
to_backup->stream = to_backup->stream && from_backup->stream;
to_backup->stream = from_backup->stream;
/* Compute summary of size of regular files in the backup */
to_backup->data_bytes = 0;
for (i = 0; i < parray_num(files); i++)
@ -311,7 +362,8 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
else
to_backup->wal_bytes = BYTES_INVALID;
write_backup_filelist(to_backup, files, from_database_path);
write_backup_filelist(to_backup, files, from_database_path,
from_external_prefix, NULL);
write_backup(to_backup);
delete_source_backup:
@ -329,10 +381,28 @@ delete_source_backup:
{
pgFile *file = (pgFile *) parray_get(to_files, i);
if (file->external_dir_num && to_external)
{
char *dir_name = parray_get(to_external, file->external_dir_num - 1);
if (backup_contains_external(dir_name, from_external))
/* Dir already removed*/
continue;
}
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
{
char to_file_path[MAXPGPATH];
char *prev_path;
/* We need full path, file object has relative path */
join_path_components(to_file_path, to_database_path, file->path);
prev_path = file->path;
file->path = to_file_path;
pgFileDelete(file);
elog(VERBOSE, "Deleted \"%s\"", file->path);
file->path = prev_path;
}
}
@ -378,19 +448,21 @@ merge_files(void *arg)
pgBackup *from_backup = argument->from_backup;
int i,
num_files = parray_num(argument->files);
int to_root_len = strlen(argument->to_root);
for (i = 0; i < num_files; i++)
{
pgFile *file = (pgFile *) parray_get(argument->files, i);
pgFile *to_file;
pgFile **res_file;
char to_file_path[MAXPGPATH]; /* Path of target file */
char from_file_path[MAXPGPATH];
char *prev_file_path;
if (!pg_atomic_test_set_flag(&file->lock))
continue;
/* check for interrupt */
if (interrupted)
if (interrupted || thread_interrupted)
elog(ERROR, "Interrupted during merging backups");
/* Directories were created before */
@ -402,9 +474,11 @@ merge_files(void *arg)
i + 1, num_files, file->path);
res_file = parray_bsearch(argument->to_files, file,
pgFileComparePathDesc);
pgFileComparePathWithExternalDesc);
to_file = (res_file) ? *res_file : NULL;
join_path_components(to_file_path, argument->to_root, file->path);
/*
* Skip files which haven't changed since previous backup. But in case
* of DELTA backup we should consider n_blocks to truncate the target
@ -423,26 +497,44 @@ merge_files(void *arg)
{
file->compress_alg = to_file->compress_alg;
file->write_size = to_file->write_size;
file->crc = to_file->crc;
/*
* Recalculate crc for backup prior to 2.0.25.
*/
if (parse_program_version(from_backup->program_version) < 20025)
file->crc = pgFileGetCRC(to_file_path, true, true, NULL);
/* Otherwise just get it from the previous file */
else
file->crc = to_file->crc;
}
continue;
}
/* We need to make full path, file object has relative path */
if (file->external_dir_num)
{
char temp[MAXPGPATH];
makeExternalDirPathByNum(temp, argument->from_external_prefix,
file->external_dir_num);
join_path_components(from_file_path, temp, file->path);
}
else
join_path_components(from_file_path, argument->from_root,
file->path);
prev_file_path = file->path;
file->path = from_file_path;
/*
* Move the file. We need to decompress it and compress again if
* necessary.
*/
elog(VERBOSE, "Moving file \"%s\", is_datafile %d, is_cfs %d",
elog(VERBOSE, "Merging file \"%s\", is_datafile %d, is_cfs %d",
file->path, file->is_database, file->is_cfs);
if (file->is_datafile && !file->is_cfs)
{
char to_path_tmp[MAXPGPATH]; /* Path of target file */
join_path_components(to_path_tmp, argument->to_root,
file->path + to_root_len + 1);
/*
* We need more complicate algorithm if target file should be
* compressed.
@ -453,7 +545,7 @@ merge_files(void *arg)
char tmp_file_path[MAXPGPATH];
char *prev_path;
snprintf(tmp_file_path, MAXPGPATH, "%s_tmp", to_path_tmp);
snprintf(tmp_file_path, MAXPGPATH, "%s_tmp", to_file_path);
/* Start the magic */
@ -479,7 +571,7 @@ merge_files(void *arg)
* need the file in directory to_root.
*/
prev_path = to_file->path;
to_file->path = to_path_tmp;
to_file->path = to_file_path;
/* Decompress target file into temporary one */
restore_data_file(tmp_file_path, to_file, false, false,
parse_program_version(to_backup->program_version));
@ -494,7 +586,7 @@ merge_files(void *arg)
false,
parse_program_version(from_backup->program_version));
elog(VERBOSE, "Compress file and save it to the directory \"%s\"",
elog(VERBOSE, "Compress file and save it into the directory \"%s\"",
argument->to_root);
/* Again we need to change path */
@ -504,7 +596,7 @@ merge_files(void *arg)
file->size = pgFileSize(file->path);
/* Now we can compress the file */
backup_data_file(NULL, /* We shouldn't need 'arguments' here */
to_path_tmp, file,
to_file_path, file,
to_backup->start_lsn,
to_backup->backup_mode,
to_backup->compress_alg,
@ -523,7 +615,7 @@ merge_files(void *arg)
else
{
/* We can merge in-place here */
restore_data_file(to_path_tmp, file,
restore_data_file(to_file_path, file,
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
true,
parse_program_version(from_backup->program_version));
@ -532,10 +624,27 @@ merge_files(void *arg)
* We need to calculate write_size, restore_data_file() doesn't
* do that.
*/
file->write_size = pgFileSize(to_path_tmp);
file->crc = pgFileGetCRC(to_path_tmp, true, true, NULL);
file->write_size = pgFileSize(to_file_path);
file->crc = pgFileGetCRC(to_file_path, true, true, NULL);
}
}
else if (file->external_dir_num)
{
char from_root[MAXPGPATH];
char to_root[MAXPGPATH];
int new_dir_num;
char *file_external_path = parray_get(argument->from_external,
file->external_dir_num - 1);
Assert(argument->from_external);
new_dir_num = get_external_index(file_external_path,
argument->from_external);
makeExternalDirPathByNum(from_root, argument->from_external_prefix,
file->external_dir_num);
makeExternalDirPathByNum(to_root, argument->to_external_prefix,
new_dir_num);
copy_file(from_root, to_root, file);
}
else if (strcmp(file->name, "pg_control") == 0)
copy_pgcontrol_file(argument->from_root, argument->to_root, file);
else
@ -548,8 +657,11 @@ merge_files(void *arg)
file->compress_alg = to_backup->compress_alg;
if (file->write_size != BYTES_INVALID)
elog(LOG, "Moved file \"%s\": " INT64_FORMAT " bytes",
elog(LOG, "Merged file \"%s\": " INT64_FORMAT " bytes",
file->path, file->write_size);
/* Restore relative path */
file->path = prev_file_path;
}
/* Data files merging is successful */
@ -557,3 +669,66 @@ merge_files(void *arg)
return NULL;
}
/* Recursively delete a directory and its contents */
static void
remove_dir_with_files(const char *path)
{
parray *files = parray_new();
dir_list_file(files, path, true, true, true, 0);
parray_qsort(files, pgFileComparePathDesc);
for (int i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
pgFileDelete(file);
elog(VERBOSE, "Deleted \"%s\"", file->path);
}
}
/* Get index of external directory */
static int
get_external_index(const char *key, const parray *list)
{
if (!list) /* Nowhere to search */
return -1;
for (int i = 0; i < parray_num(list); i++)
{
if (strcmp(key, parray_get(list, i)) == 0)
return i + 1;
}
return -1;
}
/* Rename directories in to_backup according to order in from_external */
static void
reorder_external_dirs(pgBackup *to_backup, parray *to_external,
parray *from_external)
{
char externaldir_template[MAXPGPATH];
pgBackupGetPath(to_backup, externaldir_template,
lengthof(externaldir_template), EXTERNAL_DIR);
for (int i = 0; i < parray_num(to_external); i++)
{
int from_num = get_external_index(parray_get(to_external, i),
from_external);
if (from_num == -1)
{
char old_path[MAXPGPATH];
makeExternalDirPathByNum(old_path, externaldir_template, i + 1);
remove_dir_with_files(old_path);
}
else if (from_num != i + 1)
{
char old_path[MAXPGPATH];
char new_path[MAXPGPATH];
makeExternalDirPathByNum(old_path, externaldir_template, i + 1);
makeExternalDirPathByNum(new_path, externaldir_template, from_num);
elog(VERBOSE, "Rename %s to %s", old_path, new_path);
if (rename (old_path, new_path) == -1)
elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s",
old_path, new_path, strerror(errno));
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@
#include "utils/thread.h"
#include <time.h>
const char *PROGRAM_VERSION = "2.0.25";
const char *PROGRAM_VERSION = "2.0.27";
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
@ -55,6 +55,8 @@ char backup_instance_path[MAXPGPATH];
*/
char arclog_path[MAXPGPATH] = "";
/* colon separated external directories list ("/path1:/path2") */
char *externaldir = NULL;
/* common options */
static char *backup_id_string = NULL;
int num_threads = 1;
@ -63,6 +65,7 @@ bool progress = false;
#if PG_VERSION_NUM >= 100000
char *replication_slot = NULL;
#endif
bool temp_slot = false;
/* backup options */
bool backup_logs = false;
@ -70,10 +73,10 @@ bool smooth_checkpoint;
bool is_remote_backup = false;
/* restore options */
static char *target_time;
static char *target_xid;
static char *target_lsn;
static char *target_inclusive;
static char *target_time = NULL;
static char *target_xid = NULL;
static char *target_lsn = NULL;
static char *target_inclusive = NULL;
static TimeLineID target_tli;
static bool target_immediate;
static char *target_name = NULL;
@ -85,6 +88,7 @@ bool restore_as_replica = false;
bool restore_no_validate = false;
bool skip_block_validation = false;
bool skip_external_dirs = false;
bool do_block_validation = false;
bool do_amcheck = false;
@ -137,6 +141,7 @@ static ConfigOption cmd_options[] =
{ 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT },
{ 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMD_STRICT },
{ 's', 'S', "slot", &replication_slot, SOURCE_CMD_STRICT },
{ 'b', 234, "temp-slot", &temp_slot, SOURCE_CMD_STRICT },
{ 'b', 134, "delete-wal", &delete_wal, SOURCE_CMD_STRICT },
{ 'b', 135, "delete-expired", &delete_expired, SOURCE_CMD_STRICT },
/* TODO not completed feature. Make it unavailiable from user level
@ -147,6 +152,7 @@ static ConfigOption cmd_options[] =
{ 's', 138, "inclusive", &target_inclusive, SOURCE_CMD_STRICT },
{ 'u', 139, "timeline", &target_tli, SOURCE_CMD_STRICT },
{ 'f', 'T', "tablespace-mapping", opt_tablespace_map, SOURCE_CMD_STRICT },
{ 'f', 155, "external-mapping", opt_externaldir_map, SOURCE_CMD_STRICT },
{ 'b', 140, "immediate", &target_immediate, SOURCE_CMD_STRICT },
{ 's', 141, "recovery-target-name", &target_name, SOURCE_CMD_STRICT },
{ 's', 142, "recovery-target-action", &target_action, SOURCE_CMD_STRICT },
@ -154,9 +160,10 @@ static ConfigOption cmd_options[] =
{ 'b', 143, "no-validate", &restore_no_validate, SOURCE_CMD_STRICT },
{ 's', 144, "lsn", &target_lsn, SOURCE_CMD_STRICT },
{ 'b', 154, "skip-block-validation", &skip_block_validation, SOURCE_CMD_STRICT },
{ 'b', 156, "skip-external-dirs", &skip_external_dirs, SOURCE_CMD_STRICT },
/* checkdb options */
{ 'b', 155, "amcheck", &do_amcheck, SOURCE_CMD_STRICT },
{ 'b', 156, "block-validation", &do_block_validation, SOURCE_CMD_STRICT },
{ 'b', 157, "amcheck", &do_amcheck, SOURCE_CMD_STRICT },
{ 'b', 158, "block-validation", &do_block_validation, SOURCE_CMD_STRICT },
/* delete options */
{ 'b', 145, "wal", &delete_wal, SOURCE_CMD_STRICT },
{ 'b', 146, "expired", &delete_expired, SOURCE_CMD_STRICT },
@ -377,16 +384,23 @@ main(int argc, char *argv[])
}
/*
* Read options from env variables or from config file */
if ((backup_path != NULL) && instance_name)
* We read options from command line, now we need to read them from
* configuration file since we got backup path and instance name.
* For some commands an instance option isn't required, see above.
*/
if (instance_name)
{
char config_path[MAXPGPATH];
char path[MAXPGPATH];
/* Read environment variables */
config_get_opt_env(instance_options);
/* Read options from configuration file */
join_path_components(config_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
config_read_opt(config_path, instance_options, ERROR, true);
if (backup_subcmd != ADD_INSTANCE_CMD)
{
join_path_components(path, backup_instance_path,
BACKUP_CATALOG_CONF_FILE);
config_read_opt(path, instance_options, ERROR, true, false);
}
}
/* Just read environment variables */
@ -416,6 +430,8 @@ main(int argc, char *argv[])
* We have read pgdata path from command line or from configuration file.
* Ensure that pgdata is an absolute path.
*/
if (instance_config.pgdata != NULL)
canonicalize_path(instance_config.pgdata);
if (instance_config.pgdata != NULL &&
!is_absolute_path(instance_config.pgdata))
elog(ERROR, "-D, --pgdata must be an absolute path");
@ -461,7 +477,7 @@ main(int argc, char *argv[])
for (i = 0; pgdata_exclude_dir[i]; i++); /* find first empty slot */
/* Set 'pg_log' in first empty slot */
pgdata_exclude_dir[i] = "pg_log";
pgdata_exclude_dir[i] = PG_LOG_DIR;
}
if (backup_subcmd == VALIDATE_CMD || backup_subcmd == RESTORE_CMD)
@ -537,7 +553,7 @@ main(int argc, char *argv[])
do_show_config();
break;
case SET_CONFIG_CMD:
do_set_config();
do_set_config(false);
break;
case CHECKDB_CMD:
do_checkdb(do_block_validation, do_amcheck);
@ -600,13 +616,13 @@ compress_init(void)
if (instance_config.compress_level < 0 || instance_config.compress_level > 9)
elog(ERROR, "--compress-level value must be in the range from 0 to 9");
if (instance_config.compress_level == 0)
instance_config.compress_alg = NOT_DEFINED_COMPRESS;
if (instance_config.compress_alg == ZLIB_COMPRESS && instance_config.compress_level == 0)
elog(WARNING, "Compression level 0 will lead to data bloat!");
if (backup_subcmd == BACKUP_CMD || backup_subcmd == ARCHIVE_PUSH_CMD)
{
#ifndef HAVE_LIBZ
if (compress_alg == ZLIB_COMPRESS)
if (instance_config.compress_alg == ZLIB_COMPRESS)
elog(ERROR, "This build does not support zlib compression");
else
#endif

View File

@ -12,6 +12,7 @@
#include "postgres_fe.h"
#include "libpq-fe.h"
#include "libpq-int.h"
#include "access/xlog_internal.h"
#include "utils/pg_crc.h"
@ -32,22 +33,25 @@
#include "datapagemap.h"
/* Directory/File names */
#define DATABASE_DIR "database"
#define DATABASE_DIR "database"
#define BACKUPS_DIR "backups"
#if PG_VERSION_NUM >= 100000
#define PG_XLOG_DIR "pg_wal"
#define PG_LOG_DIR "log"
#else
#define PG_XLOG_DIR "pg_xlog"
#define PG_LOG_DIR "pg_log"
#endif
#define PG_TBLSPC_DIR "pg_tblspc"
#define PG_GLOBAL_DIR "global"
#define BACKUP_CONTROL_FILE "backup.control"
#define BACKUP_CATALOG_CONF_FILE "pg_probackup.conf"
#define BACKUP_CATALOG_PID "pg_probackup.pid"
#define BACKUP_CATALOG_PID "backup.pid"
#define DATABASE_FILE_LIST "backup_content.control"
#define PG_BACKUP_LABEL_FILE "backup_label"
#define PG_BLACK_LIST "black_list"
#define PG_TABLESPACE_MAP_FILE "tablespace_map"
#define EXTERNAL_DIR "external_directories/externaldir"
/* Timeout defaults */
#define ARCHIVE_TIMEOUT_DEFAULT 300
@ -121,6 +125,7 @@ typedef struct pgFile
int n_blocks; /* size of the file in blocks, readed during DELTA backup */
bool is_cfs; /* Flag to distinguish files compressed by CFS*/
bool is_database;
int external_dir_num; /* Number of external directory. 0 if not external */
bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */
CompressAlg compress_alg; /* compression algorithm applied to the file */
volatile pg_atomic_flag lock; /* lock for synchronization of parallel threads */
@ -175,7 +180,8 @@ typedef enum ShowFormat
/* special values of pgBackup fields */
#define INVALID_BACKUP_ID 0 /* backup ID is not provided by user */
#define BYTES_INVALID (-1)
#define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */
#define FILE_NOT_FOUND (-2) /* file disappeared during backup */
#define BLOCKNUM_INVALID (-1)
/*
@ -188,6 +194,7 @@ typedef struct InstanceConfig
uint32 xlog_seg_size;
char *pgdata;
char *external_dir_str;
const char *pgdatabase;
const char *pghost;
const char *pgport;
@ -230,6 +237,7 @@ struct pgBackup
XLogRecPtr stop_lsn; /* backup's finishing transaction log location */
time_t start_time; /* since this moment backup has status
* BACKUP_STATUS_RUNNING */
time_t merge_time; /* the moment when merge was started or 0 */
time_t end_time; /* the moment when backup was finished, or the moment
* when we realized that backup is broken */
time_t recovery_time; /* Earliest moment for which you can restore
@ -268,6 +276,8 @@ struct pgBackup
pgBackup *parent_backup_link;
char *primary_conninfo; /* Connection parameters of the backup
* in the format suitable for recovery.conf */
char *external_dir_str; /* List of external directories,
* separated by ':' */
};
/* Recovery target for restore and validate subcommands */
@ -298,9 +308,11 @@ typedef struct
{
const char *from_root;
const char *to_root;
const char *external_prefix;
parray *files_list;
parray *prev_filelist;
parray *external_dirs;
XLogRecPtr prev_start_lsn;
PGconn *backup_conn;
@ -366,6 +378,7 @@ extern bool progress;
/* In pre-10 'replication_slot' is defined in receivelog.h */
extern char *replication_slot;
#endif
extern bool temp_slot;
/* backup options */
extern bool smooth_checkpoint;
@ -378,6 +391,7 @@ extern bool exclusive_backup;
/* restore options */
extern bool restore_as_replica;
extern bool skip_block_validation;
extern bool skip_external_dirs;
/* delete options */
extern bool delete_wal;
@ -440,7 +454,7 @@ extern int do_archive_get(char *wal_file_path, char *wal_file_name);
/* in configure.c */
extern void do_show_config(void);
extern void do_set_config(void);
extern void do_set_config(bool missing_ok);
extern void init_config(InstanceConfig *config);
/* in show.c */
@ -470,17 +484,20 @@ extern int do_validate_all(void);
/* in catalog.c */
extern pgBackup *read_backup(time_t timestamp);
extern void write_backup(pgBackup *backup);
extern void write_backup_status(pgBackup *backup);
extern void write_backup_status(pgBackup *backup, BackupStatus status);
extern bool lock_backup(pgBackup *backup);
extern const char *pgBackupGetBackupMode(pgBackup *backup);
extern parray *catalog_get_backup_list(time_t requested_backup_id);
extern void catalog_lock_backup_list(parray *backup_list, int from_idx,
int to_idx);
extern pgBackup *catalog_get_last_data_backup(parray *backup_list,
TimeLineID tli);
extern void catalog_lock(void);
extern void pgBackupWriteControl(FILE *out, pgBackup *backup);
extern void write_backup_filelist(pgBackup *backup, parray *files,
const char *root);
const char *root, const char *external_prefix,
parray *external_list);
extern void pgBackupGetPath(const pgBackup *backup, char *path, size_t len,
const char *subdir);
@ -505,17 +522,27 @@ extern const char* deparse_compress_alg(int alg);
/* in dir.c */
extern void dir_list_file(parray *files, const char *root, bool exclude,
bool omit_symlink, bool add_root);
bool omit_symlink, bool add_root, int external_dir_num);
extern void create_data_directories(const char *data_dir,
const char *backup_dir,
bool extract_tablespaces);
extern void read_tablespace_map(parray *files, const char *backup_dir);
extern void opt_tablespace_map(ConfigOption *opt, const char *arg);
extern void opt_externaldir_map(ConfigOption *opt, const char *arg);
extern void check_tablespace_mapping(pgBackup *backup);
extern void check_external_dir_mapping(pgBackup *backup);
extern char *get_external_remap(char *current_dir);
extern void print_file_list(FILE *out, const parray *files, const char *root);
extern parray *dir_read_file_list(const char *root, const char *file_txt);
extern void print_file_list(FILE *out, const parray *files, const char *root,
const char *external_prefix, parray *external_list);
extern parray *dir_read_file_list(const char *root, const char *external_prefix,
const char *file_txt);
extern parray *make_external_directory_list(const char *colon_separated_dirs);
extern void free_dir_list(parray *list);
extern void makeExternalDirPathByNum(char *ret_path, const char *pattern_path,
const int dir_num);
extern bool backup_contains_external(const char *dir, parray *dirs_list);
extern int dir_create_dir(const char *path, mode_t mode);
extern bool dir_is_empty(const char *path);
@ -523,14 +550,16 @@ extern bool dir_is_empty(const char *path);
extern bool fileExists(const char *path);
extern size_t pgFileSize(const char *path);
extern pgFile *pgFileNew(const char *path, bool omit_symlink);
extern pgFile *pgFileNew(const char *path, bool omit_symlink, int external_dir_num);
extern pgFile *pgFileInit(const char *path);
extern void pgFileDelete(pgFile *file);
extern void pgFileFree(void *file);
extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c,
bool raise_on_deleted, size_t *bytes_read);
extern int pgFileComparePath(const void *f1, const void *f2);
extern int pgFileComparePathWithExternal(const void *f1, const void *f2);
extern int pgFileComparePathDesc(const void *f1, const void *f2);
extern int pgFileComparePathWithExternalDesc(const void *f1, const void *f2);
extern int pgFileCompareLinked(const void *f1, const void *f2);
extern int pgFileCompareSize(const void *f1, const void *f2);
@ -558,14 +587,11 @@ extern bool check_file_pages(pgFile *file, XLogRecPtr stop_lsn,
/* parsexlog.c */
extern void extractPageMap(const char *archivedir,
TimeLineID tli, uint32 seg_size,
XLogRecPtr startpoint, XLogRecPtr endpoint,
parray *files);
extern void validate_wal(pgBackup *backup,
const char *archivedir,
time_t target_time,
TransactionId target_xid,
XLogRecPtr target_lsn,
TimeLineID tli, uint32 seg_size);
XLogRecPtr startpoint, XLogRecPtr endpoint);
extern void validate_wal(pgBackup *backup, const char *archivedir,
time_t target_time, TransactionId target_xid,
XLogRecPtr target_lsn, TimeLineID tli,
uint32 seg_size);
extern bool read_recovery_info(const char *archivedir, TimeLineID tli,
uint32 seg_size,
XLogRecPtr start_lsn, XLogRecPtr stop_lsn,

View File

@ -21,6 +21,9 @@ typedef struct
{
parray *files;
pgBackup *backup;
parray *req_external_dirs;
parray *cur_external_dirs;
char *external_prefix;
/*
* Return value from the thread.
@ -29,7 +32,7 @@ typedef struct
int ret;
} restore_files_arg;
static void restore_backup(pgBackup *backup);
static void restore_backup(pgBackup *backup, const char *external_dir_str);
static void create_recovery_conf(time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup);
@ -37,7 +40,6 @@ static parray *read_timeline_history(TimeLineID targetTLI);
static void *restore_files(void *arg);
static void remove_deleted_files(pgBackup *backup);
/*
* Entry point of pg_probackup RESTORE and VALIDATE subcommands.
*/
@ -53,10 +55,8 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
pgBackup *dest_backup = NULL;
pgBackup *base_full_backup = NULL;
pgBackup *corrupted_backup = NULL;
int dest_backup_index = 0;
int base_full_backup_index = 0;
int corrupted_backup_index = 0;
char *action = is_restore ? "Restore":"Validate";
parray *parent_chain = NULL;
if (is_restore)
{
@ -74,8 +74,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
elog(LOG, "%s begin.", action);
/* Get exclusive lock of backup catalog */
catalog_lock();
/* Get list of all backups sorted in order of descending start time */
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
@ -125,7 +123,8 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
{
if ((current_backup->status == BACKUP_STATUS_DONE ||
current_backup->status == BACKUP_STATUS_ORPHAN ||
current_backup->status == BACKUP_STATUS_CORRUPT)
current_backup->status == BACKUP_STATUS_CORRUPT ||
current_backup->status == BACKUP_STATUS_RUNNING)
&& !rt->restore_no_validate)
elog(WARNING, "Backup %s has status: %s",
base36enc(current_backup->start_time), status2str(current_backup->status));
@ -177,8 +176,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
if (dest_backup == NULL)
elog(ERROR, "Backup satisfying target options is not found.");
dest_backup_index = get_backup_index_number(backups, dest_backup);
/* If we already found dest_backup, look for full backup. */
if (dest_backup->backup_mode == BACKUP_MODE_FULL)
base_full_backup = dest_backup;
@ -199,7 +196,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
missing_backup_start_time = tmp_backup->parent_backup;
missing_backup_id = base36enc_dup(tmp_backup->parent_backup);
for (j = get_backup_index_number(backups, tmp_backup); j >= 0; j--)
for (j = 0; j < parray_num(backups); j++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
@ -210,8 +207,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
{
if (backup->status == BACKUP_STATUS_OK)
{
backup->status = BACKUP_STATUS_ORPHAN;
write_backup_status(backup);
write_backup_status(backup, BACKUP_STATUS_ORPHAN);
elog(WARNING, "Backup %s is orphaned because his parent %s is missing",
base36enc(backup->start_time), missing_backup_id);
@ -234,7 +230,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/* parent_backup_id contain human-readable backup ID of oldest invalid backup */
parent_backup_id = base36enc_dup(tmp_backup->start_time);
for (j = get_backup_index_number(backups, tmp_backup) - 1; j >= 0; j--)
for (j = 0; j < parray_num(backups); j++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
@ -243,8 +239,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
{
if (backup->status == BACKUP_STATUS_OK)
{
backup->status = BACKUP_STATUS_ORPHAN;
write_backup_status(backup);
write_backup_status(backup, BACKUP_STATUS_ORPHAN);
elog(WARNING,
"Backup %s is orphaned because his parent %s has status: %s",
@ -261,6 +256,11 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
}
}
tmp_backup = find_parent_full_backup(dest_backup);
/* sanity */
if (!tmp_backup)
elog(ERROR, "Parent full backup for the given backup %s was not found",
base36enc(dest_backup->start_time));
}
/*
@ -276,15 +276,36 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
if (base_full_backup == NULL)
elog(ERROR, "Full backup satisfying target options is not found.");
base_full_backup_index = get_backup_index_number(backups, base_full_backup);
/*
* Ensure that directories provided in tablespace mapping are valid
* i.e. empty or not exist.
*/
if (is_restore)
{
check_tablespace_mapping(dest_backup);
check_external_dir_mapping(dest_backup);
}
/* At this point we are sure that parent chain is whole
* so we can build separate array, containing all needed backups,
* to simplify validation and restore
*/
parent_chain = parray_new();
/* Take every backup that is a child of base_backup AND parent of dest_backup
* including base_backup and dest_backup
*/
tmp_backup = dest_backup;
while(tmp_backup->parent_backup_link)
{
parray_append(parent_chain, tmp_backup);
tmp_backup = tmp_backup->parent_backup_link;
}
parray_append(parent_chain, base_full_backup);
/* for validation or restore with enabled validation */
if (!is_restore || !rt->restore_no_validate)
{
if (dest_backup->backup_mode != BACKUP_MODE_FULL)
@ -292,27 +313,39 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/*
* Validate backups from base_full_backup to dest_backup.
* At this point we are sure that parent chain is intact.
*/
for (i = base_full_backup_index; i >= dest_backup_index; i--)
for (i = parray_num(parent_chain) - 1; i >= 0; i--)
{
tmp_backup = (pgBackup *) parray_get(backups, i);
tmp_backup = (pgBackup *) parray_get(parent_chain, i);
if (is_parent(base_full_backup->start_time, tmp_backup, true))
/* Do not interrupt, validate the next backup */
if (!lock_backup(tmp_backup))
{
pgBackupValidate(tmp_backup);
/* Maybe we should be more paranoid and check for !BACKUP_STATUS_OK? */
if (tmp_backup->status == BACKUP_STATUS_CORRUPT)
if (is_restore)
elog(ERROR, "Cannot lock backup %s directory",
base36enc(tmp_backup->start_time));
else
{
corrupted_backup = tmp_backup;
corrupted_backup_index = i;
break;
elog(WARNING, "Cannot lock backup %s directory, skip validation",
base36enc(tmp_backup->start_time));
continue;
}
/* We do not validate WAL files of intermediate backups
* It`s done to speed up restore
*/
}
pgBackupValidate(tmp_backup);
/* After pgBackupValidate() only following backup
* states are possible: ERROR, RUNNING, CORRUPT and OK.
* Validate WAL only for OK, because there is no point
* in WAL validation for corrupted, errored or running backups.
*/
if (tmp_backup->status != BACKUP_STATUS_OK)
{
corrupted_backup = tmp_backup;
break;
}
/* We do not validate WAL files of intermediate backups
* It`s done to speed up restore
*/
}
/* There is no point in wal validation of corrupted backups */
@ -333,7 +366,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
char *corrupted_backup_id;
corrupted_backup_id = base36enc_dup(corrupted_backup->start_time);
for (j = corrupted_backup_index - 1; j >= 0; j--)
for (j = 0; j < parray_num(backups); j++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
@ -341,8 +374,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
{
if (backup->status == BACKUP_STATUS_OK)
{
backup->status = BACKUP_STATUS_ORPHAN;
write_backup_status(backup);
write_backup_status(backup, BACKUP_STATUS_ORPHAN);
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
base36enc(backup->start_time),
@ -355,7 +387,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
}
}
// TODO: rewrite restore to use parent_chain
/*
* If dest backup is corrupted or was orphaned in previous check
* produce corresponding error message
@ -376,19 +407,26 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
base36enc(dest_backup->start_time), status2str(dest_backup->status));
/* We ensured that all backups are valid, now restore if required
* TODO: use parent_link
* TODO: before restore - lock entire parent chain
*/
if (is_restore)
{
for (i = base_full_backup_index; i >= dest_backup_index; i--)
for (i = parray_num(parent_chain) - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
pgBackup *backup = (pgBackup *) parray_get(parent_chain, i);
if (rt->lsn_specified && parse_server_version(backup->server_version) < 100000)
elog(ERROR, "Backup %s was created for version %s which doesn't support recovery_target_lsn",
base36enc(dest_backup->start_time), dest_backup->server_version);
restore_backup(backup);
/*
* Backup was locked during validation if no-validate wasn't
* specified.
*/
if (rt->restore_no_validate && !lock_backup(backup))
elog(ERROR, "Cannot lock backup directory");
restore_backup(backup, dest_backup->external_dir_str);
}
/*
@ -405,6 +443,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/* cleanup */
parray_walk(backups, pgBackupFree);
parray_free(backups);
parray_free(parent_chain);
elog(INFO, "%s of backup %s completed.",
action, base36enc(dest_backup->start_time));
@ -415,19 +454,23 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
* Restore one backup.
*/
void
restore_backup(pgBackup *backup)
restore_backup(pgBackup *backup, const char *external_dir_str)
{
char timestamp[100];
char this_backup_path[MAXPGPATH];
char database_path[MAXPGPATH];
char external_prefix[MAXPGPATH];
char list_path[MAXPGPATH];
parray *files;
parray *requested_external_dirs = NULL;
parray *current_external_dirs = NULL;
int i;
/* arrays with meta info for multi threaded backup */
pthread_t *threads;
restore_files_arg *threads_args;
bool restore_isok = true;
if (backup->status != BACKUP_STATUS_OK)
elog(ERROR, "Backup %s cannot be restored because it is not valid",
base36enc(backup->start_time));
@ -452,34 +495,88 @@ restore_backup(pgBackup *backup)
pgBackupGetPath(backup, this_backup_path, lengthof(this_backup_path), NULL);
create_data_directories(instance_config.pgdata, this_backup_path, true);
if(external_dir_str && !skip_external_dirs)
{
requested_external_dirs = make_external_directory_list(external_dir_str);
for (i = 0; i < parray_num(requested_external_dirs); i++)
{
char *external_path = parray_get(requested_external_dirs, i);
external_path = get_external_remap(external_path);
dir_create_dir(external_path, DIR_PERMISSION);
}
}
if(backup->external_dir_str)
current_external_dirs = make_external_directory_list(backup->external_dir_str);
/*
* Get list of files which need to be restored.
*/
pgBackupGetPath(backup, database_path, lengthof(database_path), DATABASE_DIR);
pgBackupGetPath(backup, external_prefix, lengthof(external_prefix),
EXTERNAL_DIR);
pgBackupGetPath(backup, list_path, lengthof(list_path), DATABASE_FILE_LIST);
files = dir_read_file_list(database_path, list_path);
files = dir_read_file_list(database_path, external_prefix, list_path);
/* Restore directories in do_backup_instance way */
parray_qsort(files, pgFileComparePath);
/*
* Make external directories before restore
* and setup threads at the same time
*/
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
/* If the entry was an external directory, create it in the backup */
if (file->external_dir_num && S_ISDIR(file->mode))
{
char dirpath[MAXPGPATH];
char *dir_name;
char *external_path;
if (!current_external_dirs ||
parray_num(current_external_dirs) < file->external_dir_num - 1)
elog(ERROR, "Inconsistent external directory backup metadata");
external_path = parray_get(current_external_dirs,
file->external_dir_num - 1);
if (backup_contains_external(external_path, requested_external_dirs))
{
char container_dir[MAXPGPATH];
external_path = get_external_remap(external_path);
makeExternalDirPathByNum(container_dir, external_prefix,
file->external_dir_num);
dir_name = GetRelativePath(file->path, container_dir);
elog(VERBOSE, "Create directory \"%s\"", dir_name);
join_path_components(dirpath, external_path, dir_name);
dir_create_dir(dirpath, DIR_PERMISSION);
}
}
/* setup threads */
pg_atomic_clear_flag(&file->lock);
}
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
threads_args = (restore_files_arg *) palloc(sizeof(restore_files_arg)*num_threads);
/* setup threads */
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
pg_atomic_clear_flag(&file->lock);
}
/* Restore files into target directory */
thread_interrupted = false;
for (i = 0; i < num_threads; i++)
{
restore_files_arg *arg = &(threads_args[i]);
arg->files = files;
arg->backup = backup;
arg->req_external_dirs = requested_external_dirs;
arg->cur_external_dirs = current_external_dirs;
arg->external_prefix = external_prefix;
/* By default there are some error */
threads_args[i].ret = 1;
/* Useless message TODO: rewrite */
elog(LOG, "Start thread for num:%zu", parray_num(files));
pthread_create(&threads[i], NULL, restore_files, arg);
@ -519,16 +616,18 @@ remove_deleted_files(pgBackup *backup)
parray *files;
parray *files_restored;
char filelist_path[MAXPGPATH];
char external_prefix[MAXPGPATH];
int i;
pgBackupGetPath(backup, filelist_path, lengthof(filelist_path), DATABASE_FILE_LIST);
pgBackupGetPath(backup, external_prefix, lengthof(external_prefix), EXTERNAL_DIR);
/* Read backup's filelist using target database path as base path */
files = dir_read_file_list(instance_config.pgdata, filelist_path);
files = dir_read_file_list(instance_config.pgdata, external_prefix, filelist_path);
parray_qsort(files, pgFileComparePathDesc);
/* Get list of files actually existing in target database */
files_restored = parray_new();
dir_list_file(files_restored, instance_config.pgdata, true, true, false);
dir_list_file(files_restored, instance_config.pgdata, true, true, false, 0);
/* To delete from leaf, sort in reversed order */
parray_qsort(files_restored, pgFileComparePathDesc);
@ -576,7 +675,7 @@ restore_files(void *arg)
lengthof(from_root), DATABASE_DIR);
/* check for interrupt */
if (interrupted)
if (interrupted || thread_interrupted)
elog(ERROR, "interrupted during restore database");
rel_path = GetRelativePath(file->path,from_root);
@ -632,6 +731,17 @@ restore_files(void *arg)
false,
parse_program_version(arguments->backup->program_version));
}
else if (file->external_dir_num)
{
char *external_path = parray_get(arguments->cur_external_dirs,
file->external_dir_num - 1);
if (backup_contains_external(external_path,
arguments->req_external_dirs))
{
external_path = get_external_remap(external_path);
copy_file(arguments->external_prefix, external_path, file);
}
}
else if (strcmp(file->name, "pg_control") == 0)
copy_pgcontrol_file(from_root, instance_config.pgdata, file);
else

View File

@ -325,6 +325,7 @@ show_instance_plain(parray *backup_list, bool show_name)
uint32 widths[SHOW_FIELDS_COUNT];
uint32 widths_sum = 0;
ShowBackendRow *rows;
time_t current_time = time(NULL);
for (i = 0; i < SHOW_FIELDS_COUNT; i++)
widths[i] = strlen(names[i]);
@ -384,7 +385,13 @@ show_instance_plain(parray *backup_list, bool show_name)
cur++;
/* Time */
if (backup->end_time != (time_t) 0)
if (backup->status == BACKUP_STATUS_RUNNING)
snprintf(row->duration, lengthof(row->duration), "%.*lfs", 0,
difftime(current_time, backup->start_time));
else if (backup->merge_time != (time_t) 0)
snprintf(row->duration, lengthof(row->duration), "%.*lfs", 0,
difftime(backup->end_time, backup->merge_time));
else if (backup->end_time != (time_t) 0)
snprintf(row->duration, lengthof(row->duration), "%.*lfs", 0,
difftime(backup->end_time, backup->start_time));
else
@ -628,6 +635,10 @@ show_instance_json(parray *backup_list)
json_add_value(buf, "primary_conninfo", backup->primary_conninfo,
json_level, true);
if (backup->external_dir_str)
json_add_value(buf, "external-dirs", backup->external_dir_str,
json_level, true);
json_add_value(buf, "status", status2str(backup->status), json_level,
true);

View File

@ -265,12 +265,24 @@ assign_option(ConfigOption *opt, const char *optarg, OptionSource src)
}
}
if (isprint(opt->sname))
elog(ERROR, "Option -%c, --%s should be %s: '%s'",
opt->sname, opt->lname, message, optarg);
if (optarg)
{
if (isprint(opt->sname))
elog(ERROR, "Option -%c, --%s should be %s: '%s'",
opt->sname, opt->lname, message, optarg);
else
elog(ERROR, "Option --%s should be %s: '%s'",
opt->lname, message, optarg);
}
else
elog(ERROR, "Option --%s should be %s: '%s'",
opt->lname, message, optarg);
{
if (isprint(opt->sname))
elog(ERROR, "Option -%c, --%s should be %s",
opt->sname, opt->lname, message);
else
elog(ERROR, "Option --%s should be %s",
opt->lname, message);
}
}
static const char *
@ -507,7 +519,7 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[],
*/
int
config_read_opt(const char *path, ConfigOption options[], int elevel,
bool strict)
bool strict, bool missing_ok)
{
FILE *fp;
char buf[1024];
@ -518,7 +530,7 @@ config_read_opt(const char *path, ConfigOption options[], int elevel,
if (!options)
return parsed_options;
if ((fp = pgut_fopen(path, "rt", true)) == NULL)
if ((fp = pgut_fopen(path, "rt", missing_ok)) == NULL)
return parsed_options;
while (fgets(buf, lengthof(buf), fp))

View File

@ -78,7 +78,7 @@ struct ConfigOption
extern int config_get_opt(int argc, char **argv, ConfigOption cmd_options[],
ConfigOption options[]);
extern int config_read_opt(const char *path, ConfigOption options[], int elevel,
bool strict);
bool strict, bool missing_ok);
extern void config_get_opt_env(ConfigOption options[]);
extern void config_set_opt(ConfigOption options[], void *var,
OptionSource source);

View File

@ -125,9 +125,6 @@ exit_if_necessary(int elevel)
{
if (elevel > WARNING && !in_cleanup)
{
/* Interrupt other possible routines */
interrupted = true;
if (loggin_in_progress)
{
loggin_in_progress = false;
@ -136,11 +133,15 @@ exit_if_necessary(int elevel)
/* If this is not the main thread then don't call exit() */
if (main_tid != pthread_self())
{
#ifdef WIN32
ExitThread(elevel);
#else
pthread_exit(NULL);
#endif
/* Interrupt other possible routines */
thread_interrupted = true;
}
else
exit(elevel);
}

View File

@ -700,7 +700,7 @@ on_interrupt(void)
int save_errno = errno;
char errbuf[256];
/* Set interruped flag */
/* Set interrupted flag */
interrupted = true;
/*

View File

@ -7,8 +7,12 @@
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include "thread.h"
bool thread_interrupted = false;
#ifdef WIN32
DWORD main_tid = 0;
#else

View File

@ -34,7 +34,7 @@ extern DWORD main_tid;
extern pthread_t main_tid;
#endif
extern bool thread_interrupted;
extern int pthread_lock(pthread_mutex_t *mp);

View File

@ -19,6 +19,7 @@ static void *pgBackupValidateFiles(void *arg);
static void do_validate_instance(void);
static bool corrupted_backup_found = false;
static bool skipped_due_to_lock = false;
typedef struct
{
@ -43,6 +44,7 @@ void
pgBackupValidate(pgBackup *backup)
{
char base_path[MAXPGPATH];
char external_prefix[MAXPGPATH];
char path[MAXPGPATH];
parray *files;
bool corrupted = false;
@ -53,13 +55,21 @@ pgBackupValidate(pgBackup *backup)
int i;
/* Check backup version */
if (backup->program_version &&
parse_program_version(backup->program_version) > parse_program_version(PROGRAM_VERSION))
if (parse_program_version(backup->program_version) > parse_program_version(PROGRAM_VERSION))
elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. "
"pg_probackup do not guarantee to be forward compatible. "
"Please upgrade pg_probackup binary.",
PROGRAM_VERSION, base36enc(backup->start_time), backup->program_version);
if (backup->status == BACKUP_STATUS_RUNNING)
{
elog(WARNING, "Backup %s has status %s, change it to ERROR and skip validation",
base36enc(backup->start_time), status2str(backup->status));
write_backup_status(backup, BACKUP_STATUS_ERROR);
corrupted_backup_found = true;
return;
}
/* Revalidation is attempted for DONE, ORPHAN and CORRUPT backups */
if (backup->status != BACKUP_STATUS_OK &&
backup->status != BACKUP_STATUS_DONE &&
@ -90,8 +100,9 @@ pgBackupValidate(pgBackup *backup)
elog(WARNING, "Invalid backup_mode of backup %s", base36enc(backup->start_time));
pgBackupGetPath(backup, base_path, lengthof(base_path), DATABASE_DIR);
pgBackupGetPath(backup, external_prefix, lengthof(external_prefix), EXTERNAL_DIR);
pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST);
files = dir_read_file_list(base_path, path);
files = dir_read_file_list(base_path, external_prefix, path);
/* setup threads */
for (i = 0; i < parray_num(files); i++)
@ -106,6 +117,7 @@ pgBackupValidate(pgBackup *backup)
palloc(sizeof(validate_files_arg) * num_threads);
/* Validate files */
thread_interrupted = false;
for (i = 0; i < num_threads; i++)
{
validate_files_arg *arg = &(threads_args[i]);
@ -144,8 +156,8 @@ pgBackupValidate(pgBackup *backup)
parray_free(files);
/* Update backup status */
backup->status = corrupted ? BACKUP_STATUS_CORRUPT : BACKUP_STATUS_OK;
write_backup_status(backup);
write_backup_status(backup, corrupted ? BACKUP_STATUS_CORRUPT :
BACKUP_STATUS_OK);
if (corrupted)
elog(WARNING, "Backup %s data files are corrupted", base36enc(backup->start_time));
@ -175,7 +187,7 @@ pgBackupValidateFiles(void *arg)
if (!pg_atomic_test_set_flag(&file->lock))
continue;
if (interrupted)
if (interrupted || thread_interrupted)
elog(ERROR, "Interrupted during validate");
/* Validate only regular files */
@ -238,7 +250,8 @@ pgBackupValidateFiles(void *arg)
* Starting from 2.0.25 we calculate crc of pg_control differently.
*/
if (arguments->backup_version >= 20025 &&
strcmp(file->name, "pg_control") == 0)
strcmp(file->name, "pg_control") == 0 &&
!file->external_dir_num)
crc = get_pgcontrol_checksum(arguments->base_path);
else
crc = pgFileGetCRC(file->path,
@ -279,6 +292,9 @@ pgBackupValidateFiles(void *arg)
int
do_validate_all(void)
{
corrupted_backup_found = false;
skipped_due_to_lock = false;
if (instance_name == NULL)
{
/* Show list of instances */
@ -321,7 +337,13 @@ do_validate_all(void)
sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name);
join_path_components(conf_path, backup_instance_path,
BACKUP_CATALOG_CONF_FILE);
config_read_opt(conf_path, instance_options, ERROR, false);
if (config_read_opt(conf_path, instance_options, ERROR, false,
true) == 0)
{
elog(WARNING, "Configuration file \"%s\" is empty", conf_path);
corrupted_backup_found = true;
continue;
}
do_validate_instance();
}
@ -331,12 +353,24 @@ do_validate_all(void)
do_validate_instance();
}
/* TODO: Probably we should have different exit code for every condition
* and they combination:
* 0 - all backups are valid
* 1 - some backups are corrup
* 2 - some backups where skipped due to concurrent locks
* 3 - some backups are corrupt and some are skipped due to concurrent locks
*/
if (skipped_due_to_lock)
elog(WARNING, "Some backups weren't locked and they were skipped");
if (corrupted_backup_found)
{
elog(WARNING, "Some backups are not valid");
return 1;
}
else
if (!skipped_due_to_lock && !corrupted_backup_found)
elog(INFO, "All backups are valid");
return 0;
@ -356,9 +390,6 @@ do_validate_instance(void)
elog(INFO, "Validate backups of the instance '%s'", instance_name);
/* Get exclusive lock of backup catalog */
catalog_lock();
/* Get list of all backups sorted in order of descending start time */
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
@ -389,8 +420,7 @@ do_validate_instance(void)
/* orphanize current_backup */
if (current_backup->status == BACKUP_STATUS_OK)
{
current_backup->status = BACKUP_STATUS_ORPHAN;
write_backup_status(current_backup);
write_backup_status(current_backup, BACKUP_STATUS_ORPHAN);
elog(WARNING, "Backup %s is orphaned because his parent %s is missing",
base36enc(current_backup->start_time),
parent_backup_id);
@ -414,8 +444,7 @@ do_validate_instance(void)
/* orphanize current_backup */
if (current_backup->status == BACKUP_STATUS_OK)
{
current_backup->status = BACKUP_STATUS_ORPHAN;
write_backup_status(current_backup);
write_backup_status(current_backup, BACKUP_STATUS_ORPHAN);
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
base36enc(current_backup->start_time), parent_backup_id,
status2str(tmp_backup->status));
@ -429,6 +458,11 @@ do_validate_instance(void)
continue;
}
base_full_backup = find_parent_full_backup(current_backup);
/* sanity */
if (!base_full_backup)
elog(ERROR, "Parent full backup for the given backup %s was not found",
base36enc(current_backup->start_time));
}
/* chain is whole, all parents are valid at first glance,
* current backup validation can proceed
@ -439,6 +473,14 @@ do_validate_instance(void)
else
base_full_backup = current_backup;
/* Do not interrupt, validate the next backup */
if (!lock_backup(current_backup))
{
elog(WARNING, "Cannot lock backup %s directory, skip validation",
base36enc(current_backup->start_time));
skipped_due_to_lock = true;
continue;
}
/* Valiate backup files*/
pgBackupValidate(current_backup);
@ -451,14 +493,14 @@ do_validate_instance(void)
/*
* Mark every descendant of corrupted backup as orphan
*/
if (current_backup->status == BACKUP_STATUS_CORRUPT)
if (current_backup->status != BACKUP_STATUS_OK)
{
/* This is ridiculous but legal.
* PAGE1_2b <- OK
* PAGE1_2a <- OK
* PAGE1_1b <- ORPHAN
* PAGE1_1a <- CORRUPT
* FULL1 <- OK
* PAGE_b2 <- OK
* PAGE_a2 <- OK
* PAGE_b1 <- ORPHAN
* PAGE_a1 <- CORRUPT
* FULL <- OK
*/
corrupted_backup_found = true;
@ -472,8 +514,7 @@ do_validate_instance(void)
{
if (backup->status == BACKUP_STATUS_OK)
{
backup->status = BACKUP_STATUS_ORPHAN;
write_backup_status(backup);
write_backup_status(backup, BACKUP_STATUS_ORPHAN);
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
base36enc(backup->start_time),
@ -498,14 +539,14 @@ do_validate_instance(void)
pgBackup *tmp_backup = NULL;
int result;
//PAGE3b ORPHAN
//PAGE2b ORPHAN -----
//PAGE6a ORPHAN |
//PAGE5a CORRUPT |
//PAGE4a missing |
//PAGE3a missing |
//PAGE2a ORPHAN |
//PAGE1a OK <- we are here <-|
//PAGE_b2 ORPHAN
//PAGE_b1 ORPHAN -----
//PAGE_a5 ORPHAN |
//PAGE_a4 CORRUPT |
//PAGE_a3 missing |
//PAGE_a2 missing |
//PAGE_a1 ORPHAN |
//PAGE OK <- we are here<-|
//FULL OK
if (is_parent(current_backup->start_time, backup, false))
@ -525,12 +566,20 @@ do_validate_instance(void)
if (backup->status == BACKUP_STATUS_ORPHAN)
{
/* Do not interrupt, validate the next backup */
if (!lock_backup(backup))
{
elog(WARNING, "Cannot lock backup %s directory, skip validation",
base36enc(backup->start_time));
skipped_due_to_lock = true;
continue;
}
/* Revaliate backup files*/
pgBackupValidate(backup);
if (backup->status == BACKUP_STATUS_OK)
{
//tmp_backup = find_parent_full_backup(dest_backup);
/* Revalidation successful, validate corresponding WAL files */
validate_wal(backup, arclog_path, 0,
0, 0, current_backup->tli,

View File

@ -1,14 +1,11 @@
import unittest
from . import init_test, merge, option_test, show_test, compatibility, \
backup_test, delete_test, delta, restore_test, validate_test, \
retention_test, ptrack_clean, ptrack_empty, ptrack_cluster, \
ptrack_move_to_tablespace, ptrack_recovery, ptrack_truncate, \
ptrack_vacuum, ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
ptrack_vacuum_full, ptrack_vacuum_truncate, pgpro560, pgpro589, \
false_positive, replica, compression, page, ptrack, archive, \
exclude, cfs_backup, cfs_restore, cfs_validate_backup, auth_test, \
checkdb
backup_test, delete, delta, restore, validate, \
retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \
compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \
cfs_validate_backup, auth_test, time_stamp, snapfs, logging, \
locking, remote, external, config, checkdb
def load_tests(loader, tests, pattern):
@ -18,38 +15,35 @@ def load_tests(loader, tests, pattern):
suite.addTests(loader.loadTestsFromModule(backup_test))
suite.addTests(loader.loadTestsFromModule(compatibility))
suite.addTests(loader.loadTestsFromModule(checkdb))
suite.addTests(loader.loadTestsFromModule(config))
# suite.addTests(loader.loadTestsFromModule(cfs_backup))
# suite.addTests(loader.loadTestsFromModule(cfs_restore))
# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup))
# suite.addTests(loader.loadTestsFromModule(logging))
suite.addTests(loader.loadTestsFromModule(compression))
suite.addTests(loader.loadTestsFromModule(delete_test))
suite.addTests(loader.loadTestsFromModule(delete))
suite.addTests(loader.loadTestsFromModule(delta))
suite.addTests(loader.loadTestsFromModule(exclude))
suite.addTests(loader.loadTestsFromModule(false_positive))
suite.addTests(loader.loadTestsFromModule(init_test))
suite.addTests(loader.loadTestsFromModule(locking))
suite.addTests(loader.loadTestsFromModule(logging))
suite.addTests(loader.loadTestsFromModule(merge))
suite.addTests(loader.loadTestsFromModule(option_test))
suite.addTests(loader.loadTestsFromModule(page))
suite.addTests(loader.loadTestsFromModule(ptrack))
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
suite.addTests(loader.loadTestsFromModule(ptrack_empty))
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
suite.addTests(loader.loadTestsFromModule(ptrack_recovery))
suite.addTests(loader.loadTestsFromModule(ptrack_truncate))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_frozen))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_visibility))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_full))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_truncate))
# suite.addTests(loader.loadTestsFromModule(ptrack))
suite.addTests(loader.loadTestsFromModule(remote))
suite.addTests(loader.loadTestsFromModule(replica))
suite.addTests(loader.loadTestsFromModule(restore_test))
suite.addTests(loader.loadTestsFromModule(retention_test))
suite.addTests(loader.loadTestsFromModule(restore))
suite.addTests(loader.loadTestsFromModule(retention))
suite.addTests(loader.loadTestsFromModule(show_test))
suite.addTests(loader.loadTestsFromModule(validate_test))
suite.addTests(loader.loadTestsFromModule(snapfs))
suite.addTests(loader.loadTestsFromModule(validate))
suite.addTests(loader.loadTestsFromModule(pgpro560))
suite.addTests(loader.loadTestsFromModule(pgpro589))
suite.addTests(loader.loadTestsFromModule(pgpro2068))
suite.addTests(loader.loadTestsFromModule(time_stamp))
suite.addTests(loader.loadTestsFromModule(external))
return suite
@ -57,18 +51,8 @@ def load_tests(loader, tests, pattern):
# ToDo:
# archive:
# discrepancy of instance`s SYSTEMID and node`s SYSTEMID should lead to archive-push refusal to work
# replica:
# backup should exit with correct error message if some master* option is missing
# --master* options shoukd not work when backuping master
# logging:
# https://jira.postgrespro.ru/browse/PGPRO-584
# https://jira.postgrespro.ru/secure/attachment/20420/20420_doc_logging.md
# archive:
# immediate recovery and full recovery
# backward compatibility:
# previous version catalog must be readable by newer version
# incremental chain from previous version can be continued
# backups from previous version can be restored
# 10vanilla_1.3ptrack +
# 10vanilla+
# 9.6vanilla_1.3ptrack +

View File

@ -21,7 +21,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -79,7 +79,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -228,7 +228,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -283,7 +283,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -352,7 +352,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -418,7 +418,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'master'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -432,7 +432,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
master.psql(
@ -481,7 +481,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# RESTORE FULL BACKUP TAKEN FROM replica
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf(
@ -550,14 +550,14 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'master'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'archive_timeout': '10s'}
)
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
self.init_pb(backup_dir)
@ -640,7 +640,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'master'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -648,7 +648,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
'archive_timeout': '10s'}
)
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
self.init_pb(backup_dir)
@ -723,16 +723,16 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
'checkpoint_timeout': '30s'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
if self.get_version(node) < 100000:
pg_receivexlog_path = self.get_bin_path('pg_receivexlog')
else:
@ -742,7 +742,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
[
pg_receivexlog_path, '-p', str(node.port), '--synchronous',
'-D', os.path.join(backup_dir, 'wal', 'node')
], async=True)
], asynchronous=True)
if pg_receivexlog.returncode:
self.assertFalse(
@ -797,7 +797,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -806,9 +806,9 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
if self.get_version(node) < self.version_to_num('10.0'):
return unittest.skip('You need PostgreSQL 10 for this test')
return unittest.skip('You need PostgreSQL >= 10 for this test')
else:
pg_receivexlog_path = self.get_bin_path('pg_receivewal')
@ -816,7 +816,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
[
pg_receivexlog_path, '-p', str(node.port), '--synchronous',
'-Z', '9', '-D', os.path.join(backup_dir, 'wal', 'node')
], async=True)
], asynchronous=True)
if pg_receivexlog.returncode:
self.assertFalse(

View File

@ -32,7 +32,7 @@ class SimpleAuthTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -43,7 +43,7 @@ class SimpleAuthTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.safe_psql("postgres", "CREATE ROLE backup with LOGIN")
@ -126,7 +126,8 @@ class SimpleAuthTest(ProbackupTest, unittest.TestCase):
"test1", "create table t1 as select generate_series(0,100)")
node.append_conf("postgresql.auto.conf", "ptrack_enable = 'on'")
node.restart()
node.stop()
node.slow_start()
try:
self.backup_node(
@ -203,25 +204,26 @@ class AuthTest(unittest.TestCase):
cls.pb.add_instance(cls.backup_dir, cls.node.name, cls.node)
cls.pb.set_archiving(cls.backup_dir, cls.node.name, cls.node)
try:
cls.node.start()
cls.node.slow_start()
except StartNodeException:
raise unittest.skip("Node hasn't started")
cls.node.safe_psql("postgres",
"CREATE ROLE backup WITH LOGIN PASSWORD 'password'; \
GRANT USAGE ON SCHEMA pg_catalog TO backup; \
GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; \
GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; \
GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; \
GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; \
GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; \
GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; \
GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; \
GRANT EXECUTE ON FUNCTION txid_current() TO backup; \
GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; \
GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup; \
GRANT EXECUTE ON FUNCTION pg_ptrack_clear() TO backup; \
GRANT EXECUTE ON FUNCTION pg_ptrack_get_and_clear(oid, oid) TO backup;")
cls.node.safe_psql(
"postgres",
"CREATE ROLE backup WITH LOGIN PASSWORD 'password'; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
"GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; "
"GRANT EXECUTE ON FUNCTION txid_current() TO backup; "
"GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_ptrack_clear() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_ptrack_get_and_clear(oid, oid) TO backup;")
cls.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass')
@classmethod

View File

@ -2,7 +2,6 @@ import unittest
import os
from time import sleep
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from .helpers.cfs_helpers import find_by_name
module_name = 'backup'
@ -17,17 +16,16 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"""standart backup modes with ARCHIVE WAL method"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'ptrack_enable': 'on'}
)
'ptrack_enable': 'on'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(backup_dir, 'node', node)
show_backup = self.show_pb(backup_dir, 'node')[0]
@ -89,15 +87,14 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"""full backup with smooth checkpoint"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(
backup_dir, 'node', node,
@ -113,15 +110,15 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"""page-level backup without validated full backup"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
)
pg_options={'ptrack_enable': 'on'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
try:
self.backup_node(backup_dir, 'node', node, backup_type="page")
@ -169,15 +166,15 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"""page-level backup with corrupted full backup"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
)
pg_options={'ptrack_enable': 'on'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(backup_dir, 'node', node)
file = os.path.join(
@ -195,12 +192,13 @@ class BackupTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"INFO: Validate backups of the instance 'node'\n" in e.message and
"WARNING: Backup file \"{0}\" is not found\n".format(
"INFO: Validate backups of the instance 'node'" in e.message and
"WARNING: Backup file".format(
file) in e.message and
"WARNING: Backup {0} data files are corrupted\n".format(
"is not found".format(file) in e.message and
"WARNING: Backup {0} data files are corrupted".format(
backup_id) in e.message and
"WARNING: Some backups are not valid\n" in e.message,
"WARNING: Some backups are not valid" in e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))
@ -233,15 +231,15 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"""ptrack multi thread backup mode"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
)
pg_options={'ptrack_enable': 'on'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(
backup_dir, 'node', node,
@ -261,18 +259,17 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"""ptrack multi thread backup mode and stream"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'ptrack_enable': 'on',
'max_wal_senders': '2'}
)
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(
backup_dir, 'node', node, backup_type="full",
@ -292,16 +289,16 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"""make node, corrupt some page, check that backup failed"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
pg_options={'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(
backup_dir, 'node', node,
@ -349,16 +346,16 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"""make node, corrupt some page, check that backup failed"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
pg_options={'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(
backup_dir, 'node', node, backup_type="full",
@ -383,7 +380,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
f.write(b"bla")
f.flush()
f.close
node.start()
node.slow_start()
try:
self.backup_node(
@ -422,16 +419,16 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"""PGPRO-1376 """
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
pg_options={'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
self.create_tblspace_in_node(
node, 'tblspace1',
@ -459,22 +456,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
try:
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because of too many levels "
"of symbolic linking\n"
" Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'Too many levels of symbolic links' in e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))
backup_id_1 = self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
node.safe_psql(
"postgres",
@ -495,7 +479,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
).rstrip()
list = []
for root, dirs, files in os.walk(backup_dir):
for root, dirs, files in os.walk(os.path.join(
backup_dir, 'backups', 'node', backup_id_1)):
for file in files:
if file == relfilenode:
path = os.path.join(root, file)
@ -523,3 +508,491 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_tablespace_handling(self):
"""
make node, take full backup, check that restore with
tablespace mapping will end with error, take page backup,
check that restore with tablespace mapping will end with
success
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old')
tblspace2_old_path = self.get_tblspace_path(node, 'tblspace2_old')
self.create_tblspace_in_node(
node, 'some_lame_tablespace')
self.create_tblspace_in_node(
node, 'tblspace1',
tblspc_path=tblspace1_old_path)
self.create_tblspace_in_node(
node, 'tblspace2',
tblspc_path=tblspace2_old_path)
node.safe_psql(
"postgres",
"create table t_heap_lame tablespace some_lame_tablespace "
"as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
node.safe_psql(
"postgres",
"create table t_heap2 tablespace tblspace2 as select 1 as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
tblspace1_new_path = self.get_tblspace_path(node, 'tblspace1_new')
tblspace2_new_path = self.get_tblspace_path(node, 'tblspace2_new')
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
try:
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(
tblspace1_old_path, tblspace1_new_path),
"-T", "{0}={1}".format(
tblspace2_old_path, tblspace2_new_path)])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because tablespace mapping is incorrect"
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: --tablespace-mapping option' in e.message and
'have an entry in tablespace_map file' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
node.safe_psql(
"postgres",
"drop table t_heap_lame")
node.safe_psql(
"postgres",
"drop tablespace some_lame_tablespace")
self.backup_node(
backup_dir, 'node', node, backup_type="delta",
options=["-j", "4", "--stream"])
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(
tblspace1_old_path, tblspace1_new_path),
"-T", "{0}={1}".format(
tblspace2_old_path, tblspace2_new_path)])
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_tablespace_handling_1(self):
"""
make node with tablespace A, take full backup, check that restore with
tablespace mapping of tablespace B will end with error
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old')
tblspace2_old_path = self.get_tblspace_path(node, 'tblspace2_old')
tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new')
self.create_tblspace_in_node(
node, 'tblspace1',
tblspc_path=tblspace1_old_path)
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
try:
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(
tblspace2_old_path, tblspace_new_path)])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because tablespace mapping is incorrect"
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: --tablespace-mapping option' in e.message and
'have an entry in tablespace_map file' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_tablespace_handling_2(self):
"""
make node without tablespaces, take full backup, check that restore with
tablespace mapping will end with error
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old')
tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new')
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
try:
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(
tblspace1_old_path, tblspace_new_path)])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because tablespace mapping is incorrect"
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: --tablespace-mapping option' in e.message and
'have an entry in tablespace_map file' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_drop_rel_during_backup_delta(self):
""""""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i"
" as id from generate_series(0,100) i")
relative_path = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
absolute_path = os.path.join(node.data_dir, relative_path)
# FULL backup
self.backup_node(backup_dir, 'node', node, options=['--stream'])
# DELTA backup
gdb = self.backup_node(
backup_dir, 'node', node, backup_type='delta',
gdb=True, options=['--log-level-file=verbose'])
gdb.set_breakpoint('backup_files')
gdb.run_until_break()
# REMOVE file
node.safe_psql(
"postgres",
"DROP TABLE t_heap")
node.safe_psql(
"postgres",
"CHECKPOINT")
# File removed, we can proceed with backup
gdb.continue_execution_until_exit()
pgdata = self.pgdata_content(node.data_dir)
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
log_content = f.read()
self.assertTrue(
'LOG: File "{0}" is not found'.format(absolute_path) in log_content,
'File "{0}" should be deleted but it`s not'.format(absolute_path))
node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
# Physical comparison
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_drop_rel_during_backup_page(self):
""""""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i"
" as id from generate_series(0,100) i")
relative_path = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
absolute_path = os.path.join(node.data_dir, relative_path)
# FULL backup
self.backup_node(backup_dir, 'node', node, options=['--stream'])
# PAGE backup
gdb = self.backup_node(
backup_dir, 'node', node, backup_type='page',
gdb=True, options=['--log-level-file=verbose'])
gdb.set_breakpoint('backup_files')
gdb.run_until_break()
# REMOVE file
os.remove(absolute_path)
# File removed, we can proceed with backup
gdb.continue_execution_until_exit()
pgdata = self.pgdata_content(node.data_dir)
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
log_content = f.read()
self.assertTrue(
'LOG: File "{0}" is not found'.format(absolute_path) in log_content,
'File "{0}" should be deleted but it`s not'.format(absolute_path))
node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
# Physical comparison
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_drop_rel_during_backup_ptrack(self):
""""""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i"
" as id from generate_series(0,100) i")
relative_path = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
absolute_path = os.path.join(node.data_dir, relative_path)
# FULL backup
self.backup_node(backup_dir, 'node', node, options=['--stream'])
# PTRACK backup
gdb = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
gdb=True, options=['--log-level-file=verbose'])
gdb.set_breakpoint('backup_files')
gdb.run_until_break()
# REMOVE file
os.remove(absolute_path)
# File removed, we can proceed with backup
gdb.continue_execution_until_exit()
pgdata = self.pgdata_content(node.data_dir)
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
log_content = f.read()
self.assertTrue(
'LOG: File "{0}" is not found'.format(absolute_path) in log_content,
'File "{0}" should be deleted but it`s not'.format(absolute_path))
node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
# Physical comparison
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_persistent_slot_for_stream_backup(self):
""""""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_size': '40MB'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
"postgres",
"SELECT pg_create_physical_replication_slot('slot_1')")
# FULL backup
self.backup_node(
backup_dir, 'node', node,
options=['--stream', '--slot=slot_1'])
# FULL backup
self.backup_node(
backup_dir, 'node', node,
options=['--stream', '--slot=slot_1'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_temp_slot_for_stream_backup(self):
""""""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_size': '40MB'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL backup
self.backup_node(
backup_dir, 'node', node,
options=['--stream', '--temp-slot'])
if self.get_version(node) < self.version_to_num('10.0'):
return unittest.skip('You need PostgreSQL >= 10 for this test')
else:
pg_receivexlog_path = self.get_bin_path('pg_receivewal')
# FULL backup
self.backup_node(
backup_dir, 'node', node,
options=['--stream', '--slot=slot_1', '--temp-slot'])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -34,7 +34,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
self.add_instance(self.backup_dir, 'node', self.node)
self.set_archiving(self.backup_dir, 'node', self.node)
self.node.start()
self.node.slow_start()
self.create_tblspace_in_node(self.node, tblspace_name, cfs=True)
@ -745,7 +745,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
self.restore_node(
self.backup_dir, 'node', self.node,
backup_id=backup_id_full, options=["-j", "4"])
self.node.start()
self.node.slow_start()
self.assertEqual(
full_result,
self.node.safe_psql("postgres", "SELECT * FROM t_heap"),
@ -760,7 +760,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
self.restore_node(
self.backup_dir, 'node', self.node,
backup_id=backup_id_page, options=["-j", "4"])
self.node.start()
self.node.slow_start()
self.assertEqual(
page_result,
self.node.safe_psql("postgres", "SELECT * FROM t_heap"),
@ -879,7 +879,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
self.restore_node(
self.backup_dir, 'node', self.node,
backup_id=backup_id_full, options=["-j", "4"])
self.node.start()
self.node.slow_start()
self.assertEqual(
full_result_1,
self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"),
@ -905,7 +905,7 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase):
self.restore_node(
self.backup_dir, 'node', self.node,
backup_id=backup_id_page, options=["-j", "4"])
self.node.start()
self.node.slow_start()
self.assertEqual(
page_result_1,
self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"),

View File

@ -43,7 +43,7 @@ class CfsRestoreBase(ProbackupTest, unittest.TestCase):
self.add_instance(self.backup_dir, 'node', self.node)
self.set_archiving(self.backup_dir, 'node', self.node)
self.node.start()
self.node.slow_start()
self.create_tblspace_in_node(self.node, tblspace_name, cfs=True)
self.add_data_in_cluster()

View File

@ -16,7 +16,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -47,7 +47,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
# RESTORE old FULL with new binary
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
@ -109,6 +109,9 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_backward_compatibility_delta(self):
@ -116,7 +119,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -147,7 +150,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
# RESTORE old FULL with new binary
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
@ -209,6 +212,9 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_backward_compatibility_ptrack(self):
@ -216,7 +222,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -247,7 +253,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
# RESTORE old FULL with new binary
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
@ -309,6 +315,9 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_backward_compatibility_compression(self):
@ -316,7 +325,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -342,7 +351,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
# restore OLD FULL with new binary
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
@ -467,3 +476,64 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_backward_compatibility_merge(self):
"""
Create node, take FULL and PAGE backups with old binary,
merge them with new binary
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'autovacuum': 'off'})
self.init_pb(backup_dir, old_binary=True)
self.add_instance(backup_dir, 'node', node, old_binary=True)
self.set_archiving(backup_dir, 'node', node, old_binary=True)
node.slow_start()
# FULL backup with OLD binary
self.backup_node(
backup_dir, 'node', node,
old_binary=True)
node.pgbench_init(scale=1)
# PAGE backup with OLD binary
backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='page', old_binary=True)
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
self.merge_backup(backup_dir, "node", backup_id)
print(self.show_pb(backup_dir, as_text=True, as_json=False))
# restore OLD FULL with new binary
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored, options=["-j", "4"])
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -18,7 +18,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -30,7 +30,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
node.safe_psql(
@ -131,7 +131,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -142,7 +142,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
node.safe_psql(
@ -238,7 +238,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -250,7 +250,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
node.safe_psql(
@ -348,7 +348,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -360,7 +360,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
node.safe_psql(
@ -458,7 +458,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -471,7 +471,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
try:
self.backup_node(
@ -493,7 +493,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
@unittest.skip("skip")
# @unittest.skip("skip")
def test_uncompressable_pages(self):
"""
make archive node, create table with uncompressable toast pages,
@ -503,57 +503,45 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
'wal_level': 'replica'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# node.safe_psql(
# "postgres",
# "create table t_heap as select i, "
# "repeat('1234567890abcdefghiyklmn', 1)::bytea, "
# "point(0,0) from generate_series(0,1) i")
# Full
self.backup_node(
backup_dir, 'node', node,
options=[
'--compress-algorithm=zlib',
'--compress-level=0'])
node.safe_psql(
"postgres",
"create table t as select i, "
"repeat(md5(i::text),5006056) as fat_attr "
"from generate_series(0,10) i;")
node.pgbench_init(scale=3)
self.backup_node(
backup_dir, 'node', node,
backup_type='full',
backup_type='delta',
options=[
'--compress'])
'--compress-algorithm=zlib',
'--compress-level=0'])
pgdata = self.pgdata_content(node.data_dir)
node.cleanup()
self.restore_node(backup_dir, 'node', node)
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node.slow_start()
self.backup_node(
backup_dir, 'node', node,
backup_type='full',
options=[
'--compress'])
# Clean after yourself
# self.del_test_dir(module_name, fname)
# create table t as select i, repeat(md5('1234567890'), 1)::bytea, point(0,0) from generate_series(0,1) i;
# create table t_bytea_1(file oid);
# INSERT INTO t_bytea_1 (file)
# VALUES (lo_import('/home/gsmol/git/postgres/contrib/pg_probackup/tests/expected/sample.random', 24593));
# insert into t_bytea select string_agg(data,'') from pg_largeobject where pageno > 0;
#
self.del_test_dir(module_name, fname)

53
tests/config.py Normal file
View File

@ -0,0 +1,53 @@
import unittest
import subprocess
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from sys import exit
module_name = 'config'
class ConfigTest(ProbackupTest, unittest.TestCase):
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_remove_instance_config(self):
"""remove pg_probackup.conf"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.show_pb(backup_dir)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.backup_node(backup_dir, 'node', node)
self.backup_node(
backup_dir, 'node', node, backup_type='page')
conf_file = os.path.join(
backup_dir, 'backups','node', 'pg_probackup.conf')
os.unlink(os.path.join(backup_dir, 'backups','node', 'pg_probackup.conf'))
try:
self.backup_node(
backup_dir, 'node', node, backup_type='page')
self.assertEqual(
1, 0,
"Expecting Error because pg_probackup.conf is missing. "
".\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: could not open file "{0}": '
'No such file or directory'.format(conf_file),
e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))

531
tests/delete.py Normal file
View File

@ -0,0 +1,531 @@
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
import subprocess
from sys import exit
module_name = 'delete'
class DeleteTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_delete_full_backups(self):
"""delete full backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# full backup
self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
id_1 = show_backups[0]['id']
id_2 = show_backups[1]['id']
id_3 = show_backups[2]['id']
self.delete_pb(backup_dir, 'node', id_2)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(show_backups[0]['id'], id_1)
self.assertEqual(show_backups[1]['id'], id_3)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_delete_archive_mix_compress_and_non_compressed_segments(self):
"""stub"""
# @unittest.skip("skip")
def test_delete_increment_page(self):
"""delete increment and all after him"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# full backup mode
self.backup_node(backup_dir, 'node', node)
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="page")
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="page")
# full backup mode
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 4)
# delete first page backup
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 2)
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
self.assertEqual(show_backups[0]['status'], "OK")
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
self.assertEqual(show_backups[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_increment_ptrack(self):
"""delete increment and all after him"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# full backup mode
self.backup_node(backup_dir, 'node', node)
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
# full backup mode
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 4)
# delete first page backup
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 2)
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
self.assertEqual(show_backups[0]['status'], "OK")
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
self.assertEqual(show_backups[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_orphaned_wal_segments(self):
"""make archive node, make three full backups, delete second backup without --wal option, then delete orphaned wals via --wal option"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
# first full backup
backup_1_id = self.backup_node(backup_dir, 'node', node)
# second full backup
backup_2_id = self.backup_node(backup_dir, 'node', node)
# third full backup
backup_3_id = self.backup_node(backup_dir, 'node', node)
node.stop()
# Check wals
wals_dir = os.path.join(backup_dir, 'wal', 'node')
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
original_wal_quantity = len(wals)
# delete second full backup
self.delete_pb(backup_dir, 'node', backup_2_id)
# check wal quantity
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
# try to delete wals for second backup
self.delete_pb(backup_dir, 'node', options=['--wal'])
# check wal quantity
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
# delete first full backup
self.delete_pb(backup_dir, 'node', backup_1_id)
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
result = self.delete_pb(backup_dir, 'node', options=['--wal'])
# delete useless wals
self.assertTrue('INFO: removed min WAL segment' in result
and 'INFO: removed max WAL segment' in result)
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
# Check quantity, it should be lower than original
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
self.assertTrue(original_wal_quantity > len(wals), "Number of wals not changed after 'delete --wal' which is illegal")
# Delete last backup
self.delete_pb(backup_dir, 'node', backup_3_id, options=['--wal'])
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
self.assertEqual (0, len(wals), "Number of wals should be equal to 0")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_backup_with_empty_control_file(self):
"""
take backup, truncate its control file,
try to delete it via 'delete' command
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
# full backup mode
self.backup_node(
backup_dir, 'node', node, options=['--stream'])
# page backup mode
self.backup_node(
backup_dir, 'node', node, backup_type="delta", options=['--stream'])
# page backup mode
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type="delta", options=['--stream'])
with open(
os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.control'),
'wt') as f:
f.flush()
f.close()
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 3)
self.delete_pb(backup_dir, 'node', backup_id=backup_id)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_interleaved_incremental_chains(self):
"""complicated case of interleaved backup chains"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL BACKUPs
backup_id_a = self.backup_node(backup_dir, 'node', node)
backup_id_b = self.backup_node(backup_dir, 'node', node)
# Change FULL B backup status to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# FULLb ERROR
# FULLa OK
# Take PAGEa1 backup
page_id_a1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa1 OK
# FULLb ERROR
# FULLa OK
# Change FULL B backup status to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
page_id_b1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb1 OK
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# Now we start to play with first generation of PAGE backups
# Change PAGEb1 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
# Change PAGEa1 status to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
page_id_a2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEa2 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
# Change PAGEb1 status to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change PAGEa2 status to OK
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
# PAGEb2 OK
# PAGEa2 OK
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, backup_type='page')
# PAGEc1 OK
# FULLc OK
# PAGEb2 OK
# PAGEa2 OK
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Delete FULLb
self.delete_pb(
backup_dir, 'node', backup_id_b)
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 5)
print(self.show_pb(
backup_dir, 'node', as_json=False, as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_multiple_descendants(self):
"""
PAGEb3
| PAGEa3
PAGEb2 /
| PAGEa2 /
PAGEb1 \ /
| PAGEa1
FULLb |
FULLa should be deleted
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL BACKUPs
backup_id_a = self.backup_node(backup_dir, 'node', node)
backup_id_b = self.backup_node(backup_dir, 'node', node)
# Change FULLb backup status to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
page_id_a1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change FULLb backup status to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
page_id_b1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb1 OK
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# Change PAGEa1 backup status to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
# Change PAGEb1 backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
page_id_a2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEb1 backup status to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
# Change PAGEa2 backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb2 OK
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEb2 and PAGEb1 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
# PAGEb2 ERROR
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
page_id_a3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa3 OK
# PAGEb2 ERROR
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEa3 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR')
# Change PAGEb2 status to OK
self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK')
page_id_b3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb3 OK
# PAGEa3 ERROR
# PAGEb2 OK
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEa3, PAGEa2 and PAGEb1 status to OK
self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
# PAGEb3 OK
# PAGEa3 OK
# PAGEb2 OK
# PAGEa2 OK
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1
self.assertEqual(
self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'],
page_id_a1)
self.assertEqual(
self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'],
page_id_a1)
# Delete FULLa
self.delete_pb(backup_dir, 'node', backup_id_a)
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,208 +0,0 @@
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
import subprocess
from sys import exit
module_name = 'delete'
class DeleteTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_delete_full_backups(self):
"""delete full backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# full backup
self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
id_1 = show_backups[0]['id']
id_2 = show_backups[1]['id']
id_3 = show_backups[2]['id']
self.delete_pb(backup_dir, 'node', id_2)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(show_backups[0]['id'], id_1)
self.assertEqual(show_backups[1]['id'], id_3)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_delete_archive_mix_compress_and_non_compressed_segments(self):
"""stub"""
# @unittest.skip("skip")
def test_delete_increment_page(self):
"""delete increment and all after him"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# full backup mode
self.backup_node(backup_dir, 'node', node)
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="page")
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="page")
# full backup mode
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 4)
# delete first page backup
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 2)
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
self.assertEqual(show_backups[0]['status'], "OK")
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
self.assertEqual(show_backups[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_increment_ptrack(self):
"""delete increment and all after him"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# full backup mode
self.backup_node(backup_dir, 'node', node)
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
# full backup mode
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 4)
# delete first page backup
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 2)
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
self.assertEqual(show_backups[0]['status'], "OK")
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
self.assertEqual(show_backups[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_orphaned_wal_segments(self):
"""make archive node, make three full backups, delete second backup without --wal option, then delete orphaned wals via --wal option"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
# first full backup
backup_1_id = self.backup_node(backup_dir, 'node', node)
# second full backup
backup_2_id = self.backup_node(backup_dir, 'node', node)
# third full backup
backup_3_id = self.backup_node(backup_dir, 'node', node)
node.stop()
# Check wals
wals_dir = os.path.join(backup_dir, 'wal', 'node')
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
original_wal_quantity = len(wals)
# delete second full backup
self.delete_pb(backup_dir, 'node', backup_2_id)
# check wal quantity
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
# try to delete wals for second backup
self.delete_pb(backup_dir, 'node', options=['--wal'])
# check wal quantity
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
# delete first full backup
self.delete_pb(backup_dir, 'node', backup_1_id)
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
result = self.delete_pb(backup_dir, 'node', options=['--wal'])
# delete useless wals
self.assertTrue('INFO: removed min WAL segment' in result
and 'INFO: removed max WAL segment' in result)
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
# Check quantity, it should be lower than original
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
self.assertTrue(original_wal_quantity > len(wals), "Number of wals not changed after 'delete --wal' which is illegal")
# Delete last backup
self.delete_pb(backup_dir, 'node', backup_3_id, options=['--wal'])
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
self.assertEqual (0, len(wals), "Number of wals should be equal to 0")
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -23,25 +23,23 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '300s',
'autovacuum': 'off'
}
)
'autovacuum': 'off'})
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
)
base_dir=os.path.join(module_name, fname, 'node_restored'))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
node.slow_start()
node.safe_psql(
"postgres",
@ -89,7 +87,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -105,7 +103,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -116,14 +114,14 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node_restored'),
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
@ -180,7 +178,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -196,7 +194,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -207,14 +205,14 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node_restored'),
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
node.slow_start()
node.safe_psql(
"postgres",
@ -252,7 +250,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -266,7 +264,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -279,7 +277,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
node.safe_psql(
@ -318,7 +316,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -334,7 +332,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(self.output), self.cmd))
node.start()
node.slow_start()
delta_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(delta_result, delta_result_new)
node.cleanup()
@ -352,7 +350,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -365,7 +363,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
# self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
node.safe_psql(
@ -401,7 +399,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -417,7 +415,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
delta_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(delta_result, delta_result_new)
node.cleanup()
@ -434,7 +432,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -451,7 +449,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
# self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
@ -478,7 +476,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# RESTORE NODE
restored_node = self.make_simple_node(
base_dir="{0}/{1}/restored_node".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'restored_node'))
restored_node.cleanup()
tblspc_path = self.get_tblspace_path(node, 'somedata')
tblspc_path_new = self.get_tblspace_path(
@ -517,7 +515,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -527,13 +525,13 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node_restored'),
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
self.backup_node(backup_dir, 'node', node, options=['--stream'])
@ -559,16 +557,12 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
gdb = self.gdb_attach(pid)
gdb.set_breakpoint('reform_and_rewrite_tuple')
if not gdb.continue_execution_until_running():
print('Failed gdb continue')
exit(1)
gdb.continue_execution_until_running()
acurs.execute("VACUUM FULL t_heap")
if gdb.stopped_in_breakpoint():
if gdb.continue_execution_until_break(20) != 'breakpoint-hit':
print('Failed to hit breakpoint')
exit(1)
gdb.continue_execution_until_break(20)
self.backup_node(
backup_dir, 'node', node,
@ -599,7 +593,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -613,7 +607,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -627,7 +621,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
node.safe_psql(
@ -659,7 +653,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
base_dir=os.path.join(module_name, fname, 'node_restored')
)
node_restored.cleanup()
@ -681,7 +675,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# DROP DATABASE DB1
node.safe_psql(
@ -716,7 +710,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
node_restored.slow_start()
try:
node_restored.safe_psql('db1', 'select 1')
@ -746,7 +740,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -761,7 +755,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
node.safe_psql(
@ -820,7 +814,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
base_dir=os.path.join(module_name, fname, 'node_restored')
)
node_restored.cleanup()
@ -842,7 +836,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -856,7 +850,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
@ -868,7 +862,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
self.create_tblspace_in_node(node, 'somedata')
@ -901,7 +895,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
base_dir=os.path.join(module_name, fname, 'node_restored')
)
node_restored.cleanup()
@ -949,7 +943,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
@ -961,7 +955,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
# FULL backup
@ -1002,7 +996,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
base_dir=os.path.join(module_name, fname, 'node_restored')
)
node_restored.cleanup()
@ -1029,7 +1023,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -1043,7 +1037,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
@ -1056,7 +1050,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
@ -1092,7 +1086,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
base_dir=os.path.join(module_name, fname, 'node_restored')
)
node_restored.cleanup()
@ -1115,7 +1109,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -1125,7 +1119,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"""make node, corrupt some page, check that backup failed"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
@ -1134,7 +1128,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(
backup_dir, 'node', node,
@ -1184,7 +1178,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"""make node, corrupt some page, check that backup failed"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
@ -1193,7 +1187,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(
backup_dir, 'node', node, backup_type="full",
@ -1218,7 +1212,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
f.write(b"bla")
f.flush()
f.close
node.start()
node.slow_start()
try:
self.backup_node(
@ -1259,7 +1253,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -1307,7 +1301,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# Restore DELTA backup
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node_restored'),
)
node_restored.cleanup()

View File

@ -18,7 +18,7 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -28,7 +28,7 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
conn = node.connect()
with node.connect("postgres") as conn:
@ -109,7 +109,7 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -122,7 +122,7 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
conn = node.connect()
with node.connect("postgres") as conn:
@ -149,8 +149,8 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
)
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(

View File

@ -29,9 +29,9 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--format=format]
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
[-C] [--stream [-S slot-name]] [--backup-pg-log]
[-j num-threads] [--archive-timeout=archive-timeout]
[--progress]
[-C] [--stream [-S slot-name]] [--temp-slot]
[--backup-pg-log] [-j num-threads]
[--archive-timeout=archive-timeout] [--progress]
[--log-level-console=log-level-console]
[--log-level-file=log-level-file]
[--log-filename=log-filename]
@ -63,7 +63,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--skip-block-validation]
pg_probackup validate -B backup-path [--instance=instance_name]
[-i backup-id] [--progress]
[-i backup-id] [--progress] [-j num-threads]
[--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]
[--recovery-target-name=target-name]
[--timeline=timeline]

View File

@ -1 +1 @@
pg_probackup 2.0.25
pg_probackup 2.0.27

1240
tests/external.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -18,17 +18,17 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2'}
)
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(backup_dir, 'node', node)
@ -58,15 +58,15 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
"""page-level backup with corrupted full backup"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on'}
)
pg_options={'ptrack_enable': 'on'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(backup_dir, 'node', node)
file = os.path.join(
@ -120,7 +120,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -132,7 +132,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.safe_psql(
"postgres",
@ -204,7 +204,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -216,7 +216,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.safe_psql(
"postgres",
@ -300,15 +300,16 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
def test_multiple_delete(self):
"""delete multiple backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.safe_psql(
"postgres",

View File

@ -7,7 +7,7 @@ import six
import testgres
import hashlib
import re
import pwd
import getpass
import select
import psycopg2
from time import sleep
@ -89,8 +89,14 @@ def dir_files(base_dir):
def is_enterprise():
# pg_config --help
if os.name == 'posix':
cmd = [os.environ['PG_CONFIG'], '--help']
elif os.name == 'nt':
cmd = [[os.environ['PG_CONFIG']], ['--help']]
p = subprocess.Popen(
[os.environ['PG_CONFIG'], '--help'],
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
@ -112,34 +118,26 @@ class ProbackupException(Exception):
def slow_start(self, replica=False):
# wait for https://github.com/postgrespro/testgres/pull/50
# self.poll_query_until(
# "postgres",
# "SELECT not pg_is_in_recovery()",
# raise_operational_error=False)
# self.start()
# self.poll_query_until(
# "postgres",
# "SELECT not pg_is_in_recovery()",
# suppress={testgres.NodeConnection})
if replica:
query = 'SELECT pg_is_in_recovery()'
else:
query = 'SELECT not pg_is_in_recovery()'
self.start()
if not replica:
while True:
try:
self.poll_query_until(
"postgres",
"SELECT not pg_is_in_recovery()")
while True:
try:
if self.safe_psql('postgres', query) == 't\n':
break
except Exception as e:
except testgres.QueryException as e:
if 'database system is starting up' in e[0]:
continue
else:
self.poll_query_until(
"postgres",
"SELECT pg_is_in_recovery()")
# while True:
# try:
# self.poll_query_until(
# "postgres",
# "SELECT pg_is_in_recovery()")
# break
# except ProbackupException as e:
# continue
else:
raise e
class ProbackupTest(object):
@ -155,18 +153,18 @@ class ProbackupTest(object):
self.test_env = os.environ.copy()
envs_list = [
"LANGUAGE",
"LC_ALL",
"PGCONNECT_TIMEOUT",
"PGDATA",
"PGDATABASE",
"PGHOSTADDR",
"PGREQUIRESSL",
"PGSERVICE",
"PGSSLMODE",
"PGUSER",
"PGPORT",
"PGHOST"
'LANGUAGE',
'LC_ALL',
'PGCONNECT_TIMEOUT',
'PGDATA',
'PGDATABASE',
'PGHOSTADDR',
'PGREQUIRESSL',
'PGSERVICE',
'PGSSLMODE',
'PGUSER',
'PGPORT',
'PGHOST'
]
for e in envs_list:
@ -175,8 +173,8 @@ class ProbackupTest(object):
except:
pass
self.test_env["LC_MESSAGES"] = "C"
self.test_env["LC_TIME"] = "C"
self.test_env['LC_MESSAGES'] = 'C'
self.test_env['LC_TIME'] = 'C'
self.paranoia = False
if 'PG_PROBACKUP_PARANOIA' in self.test_env:
@ -210,7 +208,7 @@ class ProbackupTest(object):
self.user = self.get_username()
self.probackup_path = None
if "PGPROBACKUPBIN" in self.test_env:
if 'PGPROBACKUPBIN' in self.test_env:
if (
os.path.isfile(self.test_env["PGPROBACKUPBIN"]) and
os.access(self.test_env["PGPROBACKUPBIN"], os.X_OK)
@ -222,7 +220,7 @@ class ProbackupTest(object):
if not self.probackup_path:
probackup_path_tmp = os.path.join(
testgres.get_pg_config()["BINDIR"], 'pg_probackup')
testgres.get_pg_config()['BINDIR'], 'pg_probackup')
if os.path.isfile(probackup_path_tmp):
if not os.access(probackup_path_tmp, os.X_OK):
@ -233,7 +231,7 @@ class ProbackupTest(object):
if not self.probackup_path:
probackup_path_tmp = os.path.abspath(os.path.join(
self.dir_path, "../pg_probackup"))
self.dir_path, '../pg_probackup'))
if os.path.isfile(probackup_path_tmp):
if not os.access(probackup_path_tmp, os.X_OK):
@ -246,17 +244,22 @@ class ProbackupTest(object):
print('pg_probackup binary is not found')
exit(1)
os.environ['PATH'] = os.path.dirname(
self.probackup_path) + ":" + os.environ['PATH']
if os.name == 'posix':
os.environ['PATH'] = os.path.dirname(
self.probackup_path) + ':' + os.environ['PATH']
elif os.name == 'nt':
os.environ['PATH'] = os.path.dirname(
self.probackup_path) + ';' + os.environ['PATH']
self.probackup_old_path = None
if "PGPROBACKUPBIN_OLD" in self.test_env:
if 'PGPROBACKUPBIN_OLD' in self.test_env:
if (
os.path.isfile(self.test_env["PGPROBACKUPBIN_OLD"]) and
os.access(self.test_env["PGPROBACKUPBIN_OLD"], os.X_OK)
os.path.isfile(self.test_env['PGPROBACKUPBIN_OLD']) and
os.access(self.test_env['PGPROBACKUPBIN_OLD'], os.X_OK)
):
self.probackup_old_path = self.test_env["PGPROBACKUPBIN_OLD"]
self.probackup_old_path = self.test_env['PGPROBACKUPBIN_OLD']
else:
if self.verbose:
print('PGPROBACKUPBIN_OLD is not an executable file')
@ -280,40 +283,37 @@ class ProbackupTest(object):
initdb_params=initdb_params, allow_streaming=set_replication)
# Sane default parameters
node.append_conf("postgresql.auto.conf", "max_connections = 100")
node.append_conf("postgresql.auto.conf", "shared_buffers = 10MB")
node.append_conf("postgresql.auto.conf", "fsync = on")
node.append_conf("postgresql.auto.conf", "wal_level = logical")
node.append_conf("postgresql.auto.conf", "hot_standby = 'off'")
node.append_conf('postgresql.auto.conf', 'max_connections = 100')
node.append_conf('postgresql.auto.conf', 'shared_buffers = 10MB')
node.append_conf('postgresql.auto.conf', 'fsync = off')
node.append_conf('postgresql.auto.conf', 'wal_level = logical')
node.append_conf('postgresql.auto.conf', 'hot_standby = off')
node.append_conf(
"postgresql.auto.conf", "log_line_prefix = '%t [%p]: [%l-1] '")
node.append_conf("postgresql.auto.conf", "log_statement = none")
node.append_conf("postgresql.auto.conf", "log_duration = on")
'postgresql.auto.conf', "log_line_prefix = '%t [%p]: [%l-1] '")
node.append_conf('postgresql.auto.conf', 'log_statement = none')
node.append_conf('postgresql.auto.conf', 'log_duration = on')
node.append_conf(
"postgresql.auto.conf", "log_min_duration_statement = 0")
node.append_conf("postgresql.auto.conf", "log_connections = on")
node.append_conf("postgresql.auto.conf", "log_disconnections = on")
'postgresql.auto.conf', 'log_min_duration_statement = 0')
node.append_conf('postgresql.auto.conf', 'log_connections = on')
node.append_conf('postgresql.auto.conf', 'log_disconnections = on')
# Apply given parameters
for key, value in six.iteritems(pg_options):
node.append_conf("postgresql.auto.conf", "%s = %s" % (key, value))
node.append_conf('postgresql.auto.conf', '%s = %s' % (key, value))
# Allow replication in pg_hba.conf
if set_replication:
node.append_conf(
"pg_hba.conf",
"local replication all trust\n")
node.append_conf(
"postgresql.auto.conf",
"max_wal_senders = 10")
'postgresql.auto.conf',
'max_wal_senders = 10')
return node
def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False):
res = node.execute(
"postgres",
"select exists"
'postgres',
'select exists'
" (select 1 from pg_tablespace where spcname = '{0}')".format(
tblspc_name)
)
@ -329,11 +329,11 @@ class ProbackupTest(object):
cmd = "CREATE TABLESPACE {0} LOCATION '{1}'".format(
tblspc_name, tblspc_path)
if cfs:
cmd += " with (compression=true)"
cmd += ' with (compression=true)'
if not os.path.exists(tblspc_path):
os.makedirs(tblspc_path)
res = node.safe_psql("postgres", cmd)
res = node.safe_psql('postgres', cmd)
# Check that tablespace was successfully created
# self.assertEqual(
# res[0], 0,
@ -344,13 +344,13 @@ class ProbackupTest(object):
def get_fork_size(self, node, fork_name):
return node.execute(
"postgres",
'postgres',
"select pg_relation_size('{0}')/8192".format(fork_name))[0][0]
def get_fork_path(self, node, fork_name):
return os.path.join(
node.base_dir, 'data', node.execute(
"postgres",
'postgres',
"select pg_relation_filepath('{0}')".format(
fork_name))[0][0]
)
@ -378,7 +378,7 @@ class ProbackupTest(object):
end_page = pages_per_segment[segment_number]
else:
file_desc = os.open(
file+".{0}".format(segment_number), os.O_RDONLY
file+'.{0}'.format(segment_number), os.O_RDONLY
)
start_page = max(md5_per_page)+1
end_page = end_page + pages_per_segment[segment_number]
@ -481,8 +481,8 @@ class ProbackupTest(object):
idx_dict['ptrack'][PageNum])
)
print(
" Old checksumm: {0}\n"
" New checksumm: {1}".format(
' Old checksumm: {0}\n'
' New checksumm: {1}'.format(
idx_dict['old_pages'][PageNum],
idx_dict['new_pages'][PageNum])
)
@ -543,9 +543,9 @@ class ProbackupTest(object):
)
)
def run_pb(self, command, async=False, gdb=False, old_binary=False):
def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False):
if not self.probackup_old_path and old_binary:
print("PGPROBACKUPBIN_OLD is not set")
print('PGPROBACKUPBIN_OLD is not set')
exit(1)
if old_binary:
@ -559,7 +559,7 @@ class ProbackupTest(object):
print(self.cmd)
if gdb:
return GDBobj([binary_path] + command, self.verbose)
if async:
if asynchronous:
return subprocess.Popen(
self.cmd,
stdout=subprocess.PIPE,
@ -571,7 +571,7 @@ class ProbackupTest(object):
[binary_path] + command,
stderr=subprocess.STDOUT,
env=self.test_env
).decode("utf-8")
).decode('utf-8')
if command[0] == 'backup':
# return backup ID
for line in self.output.splitlines():
@ -580,13 +580,13 @@ class ProbackupTest(object):
else:
return self.output
except subprocess.CalledProcessError as e:
raise ProbackupException(e.output.decode("utf-8"), self.cmd)
raise ProbackupException(e.output.decode('utf-8'), self.cmd)
def run_binary(self, command, async=False):
def run_binary(self, command, asynchronous=False):
if self.verbose:
print([' '.join(map(str, command))])
try:
if async:
if asynchronous:
return subprocess.Popen(
command,
stdin=subprocess.PIPE,
@ -599,39 +599,49 @@ class ProbackupTest(object):
command,
stderr=subprocess.STDOUT,
env=self.test_env
).decode("utf-8")
).decode('utf-8')
return self.output
except subprocess.CalledProcessError as e:
raise ProbackupException(e.output.decode("utf-8"), command)
raise ProbackupException(e.output.decode('utf-8'), command)
def init_pb(self, backup_dir, old_binary=False):
shutil.rmtree(backup_dir, ignore_errors=True)
return self.run_pb([
"init",
"-B", backup_dir
'init',
'-B', backup_dir
],
old_binary=old_binary
)
def add_instance(self, backup_dir, instance, node, old_binary=False):
def add_instance(self, backup_dir, instance, node, old_binary=False, options=[]):
return self.run_pb([
"add-instance",
"--instance={0}".format(instance),
"-B", backup_dir,
"-D", node.data_dir
],
old_binary=old_binary
)
cmd = [
'add-instance',
'--instance={0}'.format(instance),
'-B', backup_dir,
'-D', node.data_dir
]
return self.run_pb(cmd + options, old_binary=old_binary)
def set_config(self, backup_dir, instance, old_binary=False, options=[]):
cmd = [
'set-config',
'--instance={0}'.format(instance),
'-B', backup_dir,
]
return self.run_pb(cmd + options, old_binary=old_binary)
def del_instance(self, backup_dir, instance, old_binary=False):
return self.run_pb([
"del-instance",
"--instance={0}".format(instance),
"-B", backup_dir
'del-instance',
'--instance={0}'.format(instance),
'-B', backup_dir
],
old_binary=old_binary
)
@ -641,7 +651,7 @@ class ProbackupTest(object):
def backup_node(
self, backup_dir, instance, node, data_dir=False,
backup_type="full", options=[], async=False, gdb=False,
backup_type='full', options=[], asynchronous=False, gdb=False,
old_binary=False
):
if not node and not data_dir:
@ -655,17 +665,17 @@ class ProbackupTest(object):
pgdata = data_dir
cmd_list = [
"backup",
"-B", backup_dir,
'backup',
'-B', backup_dir,
# "-D", pgdata,
"-p", "%i" % node.port,
"-d", "postgres",
"--instance={0}".format(instance)
'-p', '%i' % node.port,
'-d', 'postgres',
'--instance={0}'.format(instance)
]
if backup_type:
cmd_list += ["-b", backup_type]
cmd_list += ['-b', backup_type]
return self.run_pb(cmd_list + options, async, gdb, old_binary)
return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary)
def checkdb_node(
self, instance, backup_dir=False, data_dir=False,
@ -685,16 +695,16 @@ class ProbackupTest(object):
return self.run_pb(cmd_list + options, async, gdb, old_binary)
def merge_backup(
self, backup_dir, instance, backup_id, async=False,
self, backup_dir, instance, backup_id, asynchronous=False,
gdb=False, old_binary=False, options=[]):
cmd_list = [
"merge",
"-B", backup_dir,
"--instance={0}".format(instance),
"-i", backup_id
'merge',
'-B', backup_dir,
'--instance={0}'.format(instance),
'-i', backup_id
]
return self.run_pb(cmd_list + options, async, gdb, old_binary)
return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary)
def restore_node(
self, backup_dir, instance, node=False,
@ -704,13 +714,13 @@ class ProbackupTest(object):
data_dir = node.data_dir
cmd_list = [
"restore",
"-B", backup_dir,
"-D", data_dir,
"--instance={0}".format(instance)
'restore',
'-B', backup_dir,
'-D', data_dir,
'--instance={0}'.format(instance)
]
if backup_id:
cmd_list += ["-i", backup_id]
cmd_list += ['-i', backup_id]
return self.run_pb(cmd_list + options, old_binary=old_binary)
@ -722,17 +732,18 @@ class ProbackupTest(object):
backup_list = []
specific_record = {}
cmd_list = [
"show",
"-B", backup_dir,
'show',
'-B', backup_dir,
]
if instance:
cmd_list += ["--instance={0}".format(instance)]
cmd_list += ['--instance={0}'.format(instance)]
if backup_id:
cmd_list += ["-i", backup_id]
cmd_list += ['-i', backup_id]
# AHTUNG, WARNING will break json parsing
if as_json:
cmd_list += ["--format=json"]
cmd_list += ['--format=json', '--log-level-console=error']
if as_text:
# You should print it when calling as_text=true
@ -767,7 +778,7 @@ class ProbackupTest(object):
# inverse list so oldest record come first
body = body[::-1]
# split string in list with string for every header element
header_split = re.split(" +", header)
header_split = re.split(' +', header)
# Remove empty items
for i in header_split:
if i == '':
@ -779,7 +790,7 @@ class ProbackupTest(object):
for backup_record in body:
backup_record = backup_record.rstrip()
# split list with str for every backup record element
backup_record_split = re.split(" +", backup_record)
backup_record_split = re.split(' +', backup_record)
# Remove empty items
for i in backup_record_split:
if i == '':
@ -804,7 +815,7 @@ class ProbackupTest(object):
]
# print sanitized_show
for line in sanitized_show:
name, var = line.partition(" = ")[::2]
name, var = line.partition(' = ')[::2]
var = var.strip('"')
var = var.strip("'")
specific_record[name.strip()] = var
@ -812,66 +823,66 @@ class ProbackupTest(object):
def validate_pb(
self, backup_dir, instance=None,
backup_id=None, options=[], old_binary=False
backup_id=None, options=[], old_binary=False, gdb=False
):
cmd_list = [
"validate",
"-B", backup_dir
'validate',
'-B', backup_dir
]
if instance:
cmd_list += ["--instance={0}".format(instance)]
cmd_list += ['--instance={0}'.format(instance)]
if backup_id:
cmd_list += ["-i", backup_id]
cmd_list += ['-i', backup_id]
return self.run_pb(cmd_list + options, old_binary=old_binary)
return self.run_pb(cmd_list + options, old_binary=old_binary, gdb=gdb)
def delete_pb(
self, backup_dir, instance,
backup_id=None, options=[], old_binary=False):
cmd_list = [
"delete",
"-B", backup_dir
'delete',
'-B', backup_dir
]
cmd_list += ["--instance={0}".format(instance)]
cmd_list += ['--instance={0}'.format(instance)]
if backup_id:
cmd_list += ["-i", backup_id]
cmd_list += ['-i', backup_id]
return self.run_pb(cmd_list + options, old_binary=old_binary)
def delete_expired(
self, backup_dir, instance, options=[], old_binary=False):
cmd_list = [
"delete", "--expired", "--wal",
"-B", backup_dir,
"--instance={0}".format(instance)
'delete',
'-B', backup_dir,
'--instance={0}'.format(instance)
]
return self.run_pb(cmd_list + options, old_binary=old_binary)
def show_config(self, backup_dir, instance, old_binary=False):
out_dict = {}
cmd_list = [
"show-config",
"-B", backup_dir,
"--instance={0}".format(instance)
'show-config',
'-B', backup_dir,
'--instance={0}'.format(instance)
]
res = self.run_pb(cmd_list, old_binary=old_binary).splitlines()
for line in res:
if not line.startswith('#'):
name, var = line.partition(" = ")[::2]
name, var = line.partition(' = ')[::2]
out_dict[name] = var
return out_dict
def get_recovery_conf(self, node):
out_dict = {}
with open(
os.path.join(node.data_dir, "recovery.conf"), "r"
os.path.join(node.data_dir, 'recovery.conf'), 'r'
) as recovery_conf:
for line in recovery_conf:
try:
key, value = line.split("=")
key, value = line.split('=')
except:
continue
out_dict[key.strip()] = value.strip(" '").replace("'\n", "")
@ -887,35 +898,36 @@ class ProbackupTest(object):
else:
archive_mode = 'on'
# node.append_conf(
# "postgresql.auto.conf",
# "wal_level = archive"
# )
node.append_conf(
"postgresql.auto.conf",
"archive_mode = {0}".format(archive_mode)
'postgresql.auto.conf',
'archive_mode = {0}'.format(archive_mode)
)
archive_command = "{0} archive-push -B {1} --instance={2} ".format(
self.probackup_path, backup_dir, instance)
if os.name == 'posix':
archive_command = '"{0}" archive-push -B {1} --instance={2} '.format(
self.probackup_path, backup_dir, instance)
elif os.name == 'nt':
archive_command = '"{0}" archive-push -B {1} --instance={2} '.format(
self.probackup_path.replace("\\","\\\\"),
backup_dir.replace("\\","\\\\"),
instance)
if self.archive_compress or compress:
archive_command = archive_command + '--compress '
if overwrite:
archive_command = archive_command + '--overwrite '
if os.name == 'posix':
if self.archive_compress or compress:
archive_command = archive_command + "--compress "
archive_command = archive_command + '--wal-file-path %p --wal-file-name %f'
if overwrite:
archive_command = archive_command + "--overwrite "
archive_command = archive_command + "--wal-file-path %p --wal-file-name %f"
elif os.name == 'nt':
archive_command = archive_command + '--wal-file-path "%p" --wal-file-name "%f"'
node.append_conf(
"postgresql.auto.conf",
'postgresql.auto.conf',
"archive_command = '{0}'".format(
archive_command))
# elif os.name == 'nt':
# node.append_conf(
# "postgresql.auto.conf",
# "archive_command = 'copy %p {0}\\%f'".format(archive_dir)
# )
def set_replica(
self, master, replica,
@ -923,18 +935,18 @@ class ProbackupTest(object):
synchronous=False
):
replica.append_conf(
"postgresql.auto.conf", "port = {0}".format(replica.port))
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
replica.append_conf('postgresql.auto.conf', 'hot_standby = on')
replica.append_conf('recovery.conf', "standby_mode = 'on'")
replica.append_conf('recovery.conf', 'standby_mode = on')
replica.append_conf(
"recovery.conf",
'recovery.conf',
"primary_conninfo = 'user={0} port={1} application_name={2}"
" sslmode=prefer sslcompression=1'".format(
self.user, master.port, replica_name)
)
if synchronous:
master.append_conf(
"postgresql.auto.conf",
'postgresql.auto.conf',
"synchronous_standby_names='{0}'".format(replica_name)
)
master.append_conf(
@ -943,8 +955,31 @@ class ProbackupTest(object):
)
master.reload()
def change_backup_status(self, backup_dir, instance, backup_id, status):
control_file_path = os.path.join(
backup_dir, 'backups', instance, backup_id, 'backup.control')
with open(control_file_path, 'r') as f:
actual_control = f.read()
new_control_file = ''
for line in actual_control.splitlines():
if line.startswith('status'):
line = 'status = {0}'.format(status)
new_control_file += line
new_control_file += '\n'
with open(control_file_path, 'wt') as f:
f.write(new_control_file)
f.flush()
f.close()
with open(control_file_path, 'r') as f:
actual_control = f.read()
def wrong_wal_clean(self, node, wal_size):
wals_dir = os.path.join(self.backup_dir(node), "wal")
wals_dir = os.path.join(self.backup_dir(node), 'wal')
wals = [
f for f in os.listdir(wals_dir) if os.path.isfile(
os.path.join(wals_dir, f))
@ -956,39 +991,39 @@ class ProbackupTest(object):
def guc_wal_segment_size(self, node):
var = node.execute(
"postgres",
'postgres',
"select setting from pg_settings where name = 'wal_segment_size'"
)
return int(var[0][0]) * self.guc_wal_block_size(node)
def guc_wal_block_size(self, node):
var = node.execute(
"postgres",
'postgres',
"select setting from pg_settings where name = 'wal_block_size'"
)
return int(var[0][0])
def get_pgpro_edition(self, node):
if node.execute(
"postgres",
'postgres',
"select exists (select 1 from"
" pg_proc where proname = 'pgpro_edition')"
)[0][0]:
var = node.execute("postgres", "select pgpro_edition()")
var = node.execute('postgres', 'select pgpro_edition()')
return str(var[0][0])
else:
return False
def get_username(self):
""" Returns current user name """
return pwd.getpwuid(os.getuid())[0]
return getpass.getuser()
def version_to_num(self, version):
if not version:
return 0
parts = version.split(".")
parts = version.split('.')
while len(parts) < 3:
parts.append("0")
parts.append('0')
num = 0
for part in parts:
num = num * 100 + int(re.sub("[^\d]", "", part))
@ -1003,25 +1038,25 @@ class ProbackupTest(object):
"""
if isinstance(node, testgres.PostgresNode):
if self.version_to_num(
node.safe_psql("postgres", "show server_version")
node.safe_psql('postgres', 'show server_version')
) >= self.version_to_num('10.0'):
node.safe_psql("postgres", "select pg_switch_wal()")
node.safe_psql('postgres', 'select pg_switch_wal()')
else:
node.safe_psql("postgres", "select pg_switch_xlog()")
node.safe_psql('postgres', 'select pg_switch_xlog()')
else:
if self.version_to_num(
node.execute("show server_version")[0][0]
node.execute('show server_version')[0][0]
) >= self.version_to_num('10.0'):
node.execute("select pg_switch_wal()")
node.execute('select pg_switch_wal()')
else:
node.execute("select pg_switch_xlog()")
node.execute('select pg_switch_xlog()')
def wait_until_replica_catch_with_master(self, master, replica):
if self.version_to_num(
master.safe_psql(
"postgres",
"show server_version")) >= self.version_to_num('10.0'):
'postgres',
'show server_version')) >= self.version_to_num('10.0'):
master_function = 'pg_catalog.pg_current_wal_lsn()'
replica_function = 'pg_catalog.pg_last_wal_replay_lsn()'
else:
@ -1039,7 +1074,7 @@ class ProbackupTest(object):
def get_version(self, node):
return self.version_to_num(
testgres.get_pg_config()["VERSION"].split(" ")[1])
testgres.get_pg_config()['VERSION'].split(" ")[1])
def get_bin_path(self, binary):
return testgres.get_bin_path(binary)
@ -1064,7 +1099,7 @@ class ProbackupTest(object):
except:
pass
def pgdata_content(self, pgdata, ignore_ptrack=True):
def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None):
""" return dict with directory content. "
" TAKE IT AFTER CHECKPOINT or BACKUP"""
dirs_to_ignore = [
@ -1077,6 +1112,9 @@ class ProbackupTest(object):
'backup_label', 'tablespace_map', 'recovery.conf',
'ptrack_control', 'ptrack_init', 'pg_control'
]
if exclude_dirs:
dirs_to_ignore = dirs_to_ignore + exclude_dirs
# suffixes_to_ignore = (
# '_ptrack'
# )
@ -1099,6 +1137,7 @@ class ProbackupTest(object):
directory_dict['files'][file_relpath]['md5'] = hashlib.md5(
open(file_fullpath, 'rb').read()).hexdigest()
# crappy algorithm
if file.isdigit():
directory_dict['files'][file_relpath]['is_datafile'] = True
size_in_pages = os.path.getsize(file_fullpath)/8192
@ -1234,10 +1273,10 @@ class ProbackupTest(object):
host = '127.0.0.1'
return psycopg2.connect(
database="postgres",
database='postgres',
host='127.0.0.1',
port=port,
async=True
async_=True
)
def wait(self, connection):
@ -1250,7 +1289,7 @@ class ProbackupTest(object):
elif state == psycopg2.extensions.POLL_READ:
select.select([connection.fileno()], [], [])
else:
raise psycopg2.OperationalError("poll() returned %s" % state)
raise psycopg2.OperationalError('poll() returned %s' % state)
def gdb_attach(self, pid):
return GDBobj([str(pid)], self.verbose, attach=True)
@ -1271,7 +1310,7 @@ class GDBobj(ProbackupTest):
# Check gdb presense
try:
gdb_version, _ = subprocess.Popen(
["gdb", "--version"],
['gdb', '--version'],
stdout=subprocess.PIPE
).communicate()
except OSError:
@ -1322,12 +1361,16 @@ class GDBobj(ProbackupTest):
break
def set_breakpoint(self, location):
result = self._execute('break ' + location)
for line in result:
if line.startswith('~"Breakpoint'):
return
elif line.startswith('^error') or line.startswith('(gdb)'):
elif line.startswith('=breakpoint-created'):
return
elif line.startswith('^error'): #or line.startswith('(gdb)'):
break
elif line.startswith('&"break'):
@ -1358,18 +1401,17 @@ class GDBobj(ProbackupTest):
def continue_execution_until_running(self):
result = self._execute('continue')
running = False
for line in result:
if line.startswith('*running'):
running = True
break
if line.startswith('*running') or line.startswith('^running'):
return
if line.startswith('*stopped,reason="breakpoint-hit"'):
running = False
continue
if line.startswith('*stopped,reason="exited-normally"'):
running = False
continue
return running
raise GdbException(
'Failed to continue execution until running.\n'
)
def continue_execution_until_exit(self):
result = self._execute('continue', False)
@ -1380,14 +1422,27 @@ class GDBobj(ProbackupTest):
if line.startswith('*stopped,reason="breakpoint-hit"'):
continue
if (
line.startswith('*stopped,reason="exited-normally"') or
line.startswith('*stopped,reason="exited') or
line == '*stopped\n'
):
return
raise GdbException(
'Failed to continue execution until exit.\n'
)
def continue_execution_until_error(self):
result = self._execute('continue', False)
for line in result:
if line.startswith('^error'):
return
if line.startswith('*stopped,reason="exited'):
return
raise GdbException(
'Failed to continue execution until error.\n')
def continue_execution_until_break(self, ignore_count=0):
if ignore_count > 0:
result = self._execute(
@ -1397,16 +1452,14 @@ class GDBobj(ProbackupTest):
else:
result = self._execute('continue', False)
running = False
for line in result:
if line.startswith('*running'):
running = True
if line.startswith('*stopped,reason="breakpoint-hit"'):
return 'breakpoint-hit'
return
if line.startswith('*stopped,reason="exited-normally"'):
return 'exited-normally'
if running:
return 'running'
break
raise GdbException(
'Failed to continue execution until break.\n')
def stopped_in_breakpoint(self):
output = []
@ -1431,8 +1484,11 @@ class GDBobj(ProbackupTest):
output += [line]
if self.verbose:
print(repr(line))
if line == '^done\n' or line.startswith('*stopped'):
if line.startswith('^done') or line.startswith('*stopped'):
break
if running and line.startswith('*running'):
if line.startswith('^error'):
break
if running and (line.startswith('*running') or line.startswith('^running')):
# if running and line.startswith('*running'):
break
return output

View File

@ -14,7 +14,7 @@ class InitTest(ProbackupTest, unittest.TestCase):
"""Success normal init"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname))
node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node'))
self.init_pb(backup_dir)
self.assertEqual(
dir_files(backup_dir),
@ -66,7 +66,7 @@ class InitTest(ProbackupTest, unittest.TestCase):
"""Failure with backup catalog already existed"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname))
node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node'))
self.init_pb(backup_dir)
try:
self.show_pb(backup_dir, 'node')
@ -85,7 +85,7 @@ class InitTest(ProbackupTest, unittest.TestCase):
"""failure with backup catalog should be given as absolute path"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname))
node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node'))
try:
self.run_pb(["init", "-B", os.path.relpath("%s/backup" % node.base_dir, self.dir_path)])
self.assertEqual(1, 0, 'Expecting Error due to initialization with non-absolute path in --backup-path. Output: {0} \n CMD: {1}'.format(

449
tests/locking.py Normal file
View File

@ -0,0 +1,449 @@
import unittest
import os
from time import sleep
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
module_name = 'locking'
class LockingTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_locking_running_validate_1(self):
"""
make node, take full backup, stop it in the middle
run validate, expect it to successfully executed,
concurrect RUNNING backup with pid file and active process is legal
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.backup_node(backup_dir, 'node', node)
gdb = self.backup_node(
backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('copy_file')
gdb.run_until_break()
gdb.continue_execution_until_break(20)
self.assertEqual(
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
self.assertEqual(
'RUNNING', self.show_pb(backup_dir, 'node')[1]['status'])
validate_output = self.validate_pb(
backup_dir, options=['--log-level-console=LOG'])
backup_id = self.show_pb(backup_dir, 'node')[1]['id']
self.assertIn(
"is using backup {0} and still is running".format(backup_id),
validate_output,
'\n Unexpected Validate Output: {0}\n'.format(repr(validate_output)))
self.assertEqual(
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
self.assertEqual(
'RUNNING', self.show_pb(backup_dir, 'node')[1]['status'])
# Clean after yourself
# self.del_test_dir(module_name, fname)
def test_locking_running_validate_2(self):
"""
make node, take full backup, stop it in the middle,
kill process so no cleanup is done - pid file is in place,
run validate, expect it to not successfully executed,
RUNNING backup with pid file AND without active pid is legal,
but his status must be changed to ERROR and pid file is deleted
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.backup_node(backup_dir, 'node', node)
gdb = self.backup_node(
backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('copy_file')
gdb.run_until_break()
gdb.continue_execution_until_break(20)
gdb._execute('signal SIGKILL')
gdb.continue_execution_until_error()
self.assertEqual(
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
self.assertEqual(
'RUNNING', self.show_pb(backup_dir, 'node')[1]['status'])
backup_id = self.show_pb(backup_dir, 'node')[1]['id']
try:
self.validate_pb(backup_dir)
self.assertEqual(
1, 0,
"Expecting Error because RUNNING backup is no longer active.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"which used backup {0} no longer exists".format(
backup_id) in e.message and
"Backup {0} has status RUNNING, change it "
"to ERROR and skip validation".format(
backup_id) in e.message and
"WARNING: Some backups are not valid" in
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertEqual(
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
self.assertEqual(
'ERROR', self.show_pb(backup_dir, 'node')[1]['status'])
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_locking_running_validate_2_specific_id(self):
"""
make node, take full backup, stop it in the middle,
kill process so no cleanup is done - pid file is in place,
run validate on this specific backup,
expect it to not successfully executed,
RUNNING backup with pid file AND without active pid is legal,
but his status must be changed to ERROR and pid file is deleted
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.backup_node(backup_dir, 'node', node)
gdb = self.backup_node(
backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('copy_file')
gdb.run_until_break()
gdb.continue_execution_until_break(20)
gdb._execute('signal SIGKILL')
gdb.continue_execution_until_error()
self.assertEqual(
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
self.assertEqual(
'RUNNING', self.show_pb(backup_dir, 'node')[1]['status'])
backup_id = self.show_pb(backup_dir, 'node')[1]['id']
try:
self.validate_pb(backup_dir, 'node', backup_id)
self.assertEqual(
1, 0,
"Expecting Error because RUNNING backup is no longer active.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"which used backup {0} no longer exists".format(
backup_id) in e.message and
"Backup {0} has status RUNNING, change it "
"to ERROR and skip validation".format(
backup_id) in e.message and
"ERROR: Backup {0} has status: ERROR".format(backup_id) in
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertEqual(
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
self.assertEqual(
'ERROR', self.show_pb(backup_dir, 'node')[1]['status'])
try:
self.validate_pb(backup_dir, 'node', backup_id)
self.assertEqual(
1, 0,
"Expecting Error because backup has status ERROR.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Backup {0} has status: ERROR".format(backup_id),
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
try:
self.validate_pb(backup_dir)
self.assertEqual(
1, 0,
"Expecting Error because backup has status ERROR.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"WARNING: Backup {0} has status ERROR. Skip validation".format(
backup_id) in e.message and
"WARNING: Some backups are not valid" in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_locking_running_3(self):
"""
make node, take full backup, stop it in the middle,
terminate process, delete pid file,
run validate, expect it to not successfully executed,
RUNNING backup without pid file AND without active pid is legal,
his status must be changed to ERROR
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.backup_node(backup_dir, 'node', node)
gdb = self.backup_node(
backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('copy_file')
gdb.run_until_break()
gdb.continue_execution_until_break(20)
gdb._execute('signal SIGKILL')
gdb.continue_execution_until_error()
self.assertEqual(
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
self.assertEqual(
'RUNNING', self.show_pb(backup_dir, 'node')[1]['status'])
backup_id = self.show_pb(backup_dir, 'node')[1]['id']
os.remove(
os.path.join(backup_dir, 'backups', 'node', backup_id, 'backup.pid'))
try:
self.validate_pb(backup_dir)
self.assertEqual(
1, 0,
"Expecting Error because RUNNING backup is no longer active.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"Backup {0} has status RUNNING, change it "
"to ERROR and skip validation".format(
backup_id) in e.message and
"WARNING: Some backups are not valid" in
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertEqual(
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
self.assertEqual(
'ERROR', self.show_pb(backup_dir, 'node')[1]['status'])
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_locking_restore_locked(self):
"""
make node, take full backup, take two page backups,
launch validate on PAGE1 and stop it in the middle,
launch restore of PAGE2.
Expect restore to fail because validation of
intermediate backup is impossible
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL
full_id = self.backup_node(backup_dir, 'node', node)
# PAGE1
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
# PAGE2
self.backup_node(backup_dir, 'node', node, backup_type='page')
gdb = self.validate_pb(
backup_dir, 'node', backup_id=backup_id, gdb=True)
gdb.set_breakpoint('pgBackupValidate')
gdb.run_until_break()
node.cleanup()
try:
self.restore_node(backup_dir, 'node', node)
self.assertEqual(
1, 0,
"Expecting Error because restore without whole chain validation "
"is prohibited unless --no-validate provided.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"ERROR: Cannot lock backup {0} directory\n".format(full_id) in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_locking_restore_locked_without_validation(self):
"""
make node, take full backup, take page backup,
launch validate on FULL and stop it in the middle,
launch restore of PAGE.
Expect restore to fail because validation of
intermediate backup is impossible
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL
backup_id = self.backup_node(backup_dir, 'node', node)
# PAGE1
restore_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
gdb = self.validate_pb(
backup_dir, 'node', backup_id=backup_id, gdb=True)
gdb.set_breakpoint('pgBackupValidate')
gdb.run_until_break()
node.cleanup()
try:
self.restore_node(
backup_dir, 'node', node, options=['--no-validate'])
self.assertEqual(
1, 0,
"Expecting Error because restore without whole chain validation "
"is prohibited unless --no-validate provided.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"Backup {0} is used without validation".format(
restore_id) in e.message and
'is using backup {0} and still is running'.format(
backup_id) in e.message and
'ERROR: Cannot lock backup directory' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_locking_concurrent_vaidate_and_backup(self):
"""
make node, take full backup, launch validate
and stop it in the middle, take page backup.
Expect PAGE backup to be successfully executed
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL
self.backup_node(backup_dir, 'node', node)
# PAGE2
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
gdb = self.validate_pb(
backup_dir, 'node', backup_id=backup_id, gdb=True)
gdb.set_breakpoint('pgBackupValidate')
gdb.run_until_break()
# This PAGE backup is expected to be successfull
self.backup_node(backup_dir, 'node', node, backup_type='page')
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -0,0 +1,74 @@
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
import datetime
module_name = 'logging'
class LogTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
# PGPRO-2154
def test_log_rotation(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.set_config(
backup_dir, 'node',
options=['--log-rotation-age=1s', '--log-rotation-size=1MB'])
self.backup_node(
backup_dir, 'node', node,
options=['--stream', '--log-level-file=verbose'])
gdb = self.backup_node(
backup_dir, 'node', node,
options=['--stream', '--log-level-file=verbose'], gdb=True)
gdb.set_breakpoint('open_logfile')
gdb.run_until_break()
gdb.continue_execution_until_exit()
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_log_filename_strftime(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.set_config(
backup_dir, 'node',
options=['--log-rotation-age=1d'])
self.backup_node(
backup_dir, 'node', node,
options=[
'--stream',
'--log-level-file=VERBOSE',
'--log-filename=pg_probackup-%a.log'])
day_of_week = datetime.datetime.today().strftime("%a")
path = os.path.join(
backup_dir, 'log', 'pg_probackup-{0}.log'.format(day_of_week))
self.assertTrue(os.path.isfile(path))
# Clean after yourself
self.del_test_dir(module_name, fname)

File diff suppressed because it is too large Load Diff

View File

@ -52,7 +52,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'node'))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -118,7 +118,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
pg_options={
'wal_level': 'logical',
'max_wal_senders': '2'})

View File

@ -23,7 +23,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -34,13 +34,13 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'node_restored'))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
@ -116,7 +116,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -128,7 +128,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
node.safe_psql(
@ -205,7 +205,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -217,7 +217,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FULL BACKUP
node.safe_psql(
@ -300,7 +300,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -317,7 +317,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
@ -341,7 +341,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# RESTORE NODE
restored_node = self.make_simple_node(
base_dir="{0}/{1}/restored_node".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'restored_node'))
restored_node.cleanup()
tblspc_path = self.get_tblspace_path(node, 'somedata')
tblspc_path_new = self.get_tblspace_path(
@ -384,7 +384,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
@ -397,7 +397,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
# FULL backup
@ -427,7 +427,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
base_dir=os.path.join(module_name, fname, 'node_restored')
)
node_restored.cleanup()
@ -449,7 +449,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -464,7 +464,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
@ -477,7 +477,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
@ -508,7 +508,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
base_dir=os.path.join(module_name, fname, 'node_restored')
)
node_restored.cleanup()
@ -530,7 +530,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -544,21 +544,21 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={
"hot_standby": "on"
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node_restored'),
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node_restored.cleanup()
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# Do full backup
self.backup_node(backup_dir, 'node', node)
@ -598,7 +598,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
node_restored.slow_start()
# Check restored node
count2 = node_restored.execute("postgres", "select count(*) from test")
@ -619,7 +619,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={}
)
@ -627,7 +627,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# Do full backup
self.backup_node(backup_dir, 'node', node)
@ -656,7 +656,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# Drop node and restore it
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.start()
node.slow_start()
# Clean after yourself
node.cleanup()
@ -673,7 +673,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -681,7 +681,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(backup_dir, 'node', node)
@ -691,7 +691,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# delete last wal segment
wals_dir = os.path.join(backup_dir, 'wal', 'node')
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
wals_dir, f)) and not f.endswith('.backup')]
wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.partial')]
wals = map(str, wals)
file = os.path.join(wals_dir, max(wals))
os.remove(file)
@ -701,8 +701,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# Single-thread PAGE backup
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='page')
backup_dir, 'node', node, backup_type='page')
self.assertEqual(
1, 0,
"Expecting Error because of wal segment disappearance.\n "
@ -712,7 +711,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.assertTrue(
'INFO: Wait for LSN' in e.message and
'in archived WAL segment' in e.message and
'could not read WAL record at' in e.message and
'Could not read WAL record at' in e.message and
'WAL segment "{0}" is absent\n'.format(
file) in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
@ -738,7 +737,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.assertTrue(
'INFO: Wait for LSN' in e.message and
'in archived WAL segment' in e.message and
'could not read WAL record at' in e.message and
'Could not read WAL record at' in e.message and
'WAL segment "{0}" is absent\n'.format(
file) in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
@ -763,7 +762,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -771,12 +770,12 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(backup_dir, 'node', node)
# make some wals
node.pgbench_init(scale=3)
node.pgbench_init(scale=4)
# delete last wal segment
wals_dir = os.path.join(backup_dir, 'wal', 'node')
@ -830,7 +829,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.assertTrue(
'INFO: Wait for LSN' in e.message and
'in archived WAL segment' in e.message and
'could not read WAL record at' in e.message and
'Could not read WAL record at' in e.message and
'incorrect resource manager data checksum in record at' in e.message and
'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
file) in e.message,
@ -856,7 +855,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.assertTrue(
'INFO: Wait for LSN' in e.message and
'in archived WAL segment' in e.message and
'could not read WAL record at' in e.message and
'Could not read WAL record at' in e.message and
'incorrect resource manager data checksum in record at' in e.message and
'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
file) in e.message,
@ -884,23 +883,22 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
alien_node = self.make_simple_node(
base_dir="{0}/{1}/alien_node".format(module_name, fname)
)
base_dir=os.path.join(module_name, fname, 'alien_node'))
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.add_instance(backup_dir, 'alien_node', alien_node)
self.set_archiving(backup_dir, 'alien_node', alien_node)
alien_node.start()
alien_node.slow_start()
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'alien_node', alien_node)
@ -954,7 +952,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.assertTrue(
'INFO: Wait for LSN' in e.message and
'in archived WAL segment' in e.message and
'could not read WAL record at' in e.message and
'Could not read WAL record at' in e.message and
'WAL file is from different database system: WAL file database system identifier is' in e.message and
'pg_control database system identifier is' in e.message and
'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
@ -981,7 +979,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.assertTrue(
'INFO: Wait for LSN' in e.message and
'in archived WAL segment' in e.message and
'could not read WAL record at' in e.message and
'Could not read WAL record at' in e.message and
'WAL file is from different database system: WAL file database system identifier is' in e.message and
'pg_control database system identifier is' in e.message and
'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
@ -1004,7 +1002,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -1013,7 +1011,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(backup_dir, 'node', node)
@ -1042,7 +1040,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -1082,7 +1080,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
base_dir=os.path.join(module_name, fname, 'node_restored')
)
node_restored.cleanup()

169
tests/pgpro2068.py Normal file
View File

@ -0,0 +1,169 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from datetime import datetime, timedelta
import subprocess
from time import sleep
import shutil
import signal
module_name = '2068'
class BugTest(ProbackupTest, unittest.TestCase):
def test_minrecpoint_on_replica(self):
"""
https://jira.postgrespro.ru/browse/PGPRO-2068
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '60min',
'checkpoint_completion_target': '0.9',
'bgwriter_delay': '10ms',
'bgwriter_lru_maxpages': '2000',
'bgwriter_lru_multiplier': '4.0',
'max_wal_size': '100GB'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# take full backup and restore it as replica
self.backup_node(
backup_dir, 'node', node, options=['--stream'])
# start replica
replica = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'node', replica, options=['-R'])
self.set_replica(node, replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
replica.append_conf(
'postgresql.auto.conf', 'restart_after_crash = off')
# we need those later
node.safe_psql(
"postgres",
"CREATE EXTENSION plpythonu")
node.safe_psql(
"postgres",
"CREATE EXTENSION pageinspect")
replica.slow_start(replica=True)
# generate some data
node.pgbench_init(scale=10)
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
options=["-c", "4", "-T", "20"])
pgbench.wait()
pgbench.stdout.close()
# generate some more data and leave it in background
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
options=["-c", "4", "-T", "30"])
# get pids of background workers
startup_pid = replica.safe_psql(
'postgres',
"select pid from pg_stat_activity where backend_type = 'startup'").rstrip()
checkpointer_pid = replica.safe_psql(
'postgres',
"select pid from pg_stat_activity where backend_type = 'checkpointer'").rstrip()
bgwriter_pid = replica.safe_psql(
'postgres',
"select pid from pg_stat_activity where backend_type = 'background writer'").rstrip()
# wait for shared buffer on replica to be filled with dirty data
sleep(10)
# break checkpointer on UpdateLastRemovedPtr
gdb_checkpointer = self.gdb_attach(checkpointer_pid)
gdb_checkpointer.set_breakpoint('UpdateLastRemovedPtr')
gdb_checkpointer.continue_execution_until_break()
# break recovery on UpdateControlFile
gdb_recovery = self.gdb_attach(startup_pid)
gdb_recovery.set_breakpoint('UpdateMinRecoveryPoint')
gdb_recovery.continue_execution_until_break()
gdb_recovery.set_breakpoint('UpdateControlFile')
gdb_recovery.continue_execution_until_break()
# stop data generation
pgbench.wait()
pgbench.stdout.close()
# kill someone, we need a crash
os.kill(int(bgwriter_pid), 9)
gdb_recovery._execute('detach')
gdb_checkpointer._execute('detach')
# just to be sure
try:
replica.stop(['-m', 'immediate', '-D', replica.data_dir])
except:
pass
# Promote replica with 'immediate' target action
replica.append_conf(
'recovery.conf', "recovery_target = 'immediate'")
replica.append_conf(
'recovery.conf', "recovery_target_action = 'promote'")
replica.slow_start()
script = '''
DO
$$
relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'")
current_xlog_lsn = plpy.execute("select pg_last_wal_replay_lsn() as lsn")[0]['lsn']
plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn))
found_corruption = False
for relation in relations:
pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn))
if pages_from_future.nrows() == 0:
continue
for page in pages_from_future:
plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn']))
found_corruption = True
if found_corruption:
plpy.error('Found Corruption')
$$ LANGUAGE plpythonu;
'''
# Find blocks from future
replica.safe_psql(
'postgres',
script)
# error is expected if version < 10.6
# gdb_backup.continue_execution_until_exit()
# do basebackup
# do pg_probackup, expect error
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -20,15 +20,16 @@ class CheckSystemID(ProbackupTest, unittest.TestCase):
check that backup failed
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
pg_options={'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
node.slow_start()
file = os.path.join(node.base_dir,'data', 'global', 'pg_control')
os.remove(file)
@ -55,18 +56,20 @@ class CheckSystemID(ProbackupTest, unittest.TestCase):
check that backup failed
"""
fname = self.id().split('.')[3]
node1 = self.make_simple_node(base_dir="{0}/{1}/node1".format(module_name, fname),
node1 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node1'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
node1.start()
node2 = self.make_simple_node(base_dir="{0}/{1}/node2".format(module_name, fname),
pg_options={'wal_level': 'replica'})
node1.slow_start()
node2 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node2'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
node2.start()
pg_options={'wal_level': 'replica'})
node2.slow_start()
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)

View File

@ -19,7 +19,7 @@ class ArchiveCheck(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -30,7 +30,7 @@ class ArchiveCheck(ProbackupTest, unittest.TestCase):
# make erroneus archive_command
node.append_conf("postgresql.auto.conf", "archive_command = 'exit 0'")
node.start()
node.slow_start()
node.pgbench_init(scale=5)
pgbench = node.pgbench(

File diff suppressed because it is too large Load Diff

View File

@ -1,258 +0,0 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
import time
module_name = 'ptrack_clean'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_clean(self):
"""Take backups of every available types and check that PTRACK is clean"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
"as select i as id, nextval('t_seq') as t_seq, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
# Take FULL backup to clean every ptrack
self.backup_node(
backup_dir, 'node', node,
options=['-j10', '--stream'])
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything and vacuum it
node.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), "
"text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
node.safe_psql('postgres', 'vacuum t_heap')
# Take PTRACK backup to clean every ptrack
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['-j10'])
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything and vacuum it
node.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), "
"text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
node.safe_psql('postgres', 'vacuum t_heap')
# Take PAGE backup to clean every ptrack
self.backup_node(
backup_dir, 'node', node,
backup_type='page', options=['-j10'])
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_clean_replica(self):
"""Take backups of every available types from master and check that PTRACK on replica is clean"""
fname = self.id().split('.')[3]
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2',
'archive_timeout': '30s'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap as select i as id, "
"nextval('t_seq') as t_seq, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
# Take FULL backup to clean every ptrack
self.backup_node(
backup_dir,
'replica',
replica,
options=[
'-j10', '--stream',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything and vacuum it
master.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), "
"text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
master.safe_psql('postgres', 'vacuum t_heap')
# Take PTRACK backup to clean every ptrack
backup_id = self.backup_node(
backup_dir,
'replica',
replica,
backup_type='ptrack',
options=[
'-j10', '--stream',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything and vacuum it
master.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# Take PAGE backup to clean every ptrack
self.backup_node(
backup_dir,
'replica',
replica,
backup_type='page',
options=[
'-j10', '--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port),
'--stream'])
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,375 +0,0 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
from time import sleep
from sys import exit
module_name = 'ptrack_cluster'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_cluster_on_btree(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
"as select i as id, nextval('t_seq') as t_seq, "
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
"as tsvector from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(
backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'cluster t_heap using t_btree')
node.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_gist(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
# Create table and indexes
node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap as select i as id, "
"nextval('t_seq') as t_seq, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(
backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'cluster t_heap using t_gist')
node.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# Compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_btree_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap as select i as id, "
"nextval('t_seq') as t_seq, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
self.backup_node(
backup_dir, 'replica', replica, options=[
'-j10', '--stream', '--master-host=localhost',
'--master-db=postgres', '--master-port={0}'.format(
master.port)])
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'cluster t_heap using t_btree')
master.safe_psql('postgres', 'checkpoint')
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_gist_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap as select i as id, "
"nextval('t_seq') as t_seq, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
self.backup_node(
backup_dir, 'replica', replica, options=[
'-j10', '--stream', '--master-host=localhost',
'--master-db=postgres', '--master-port={0}'.format(
master.port)])
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'cluster t_heap using t_gist')
master.safe_psql('postgres', 'checkpoint')
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# Compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,183 +0,0 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
import time
module_name = 'ptrack_clean'
class SimpleTest(ProbackupTest, unittest.TestCase):
@unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_empty(self):
"""Take backups of every available types and check that PTRACK is clean"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2',
'autovacuum': 'off'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table
node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap "
"(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector) "
"tablespace somedata")
# Take FULL backup to clean every ptrack
self.backup_node(
backup_dir, 'node', node,
options=['-j10', '--stream'])
# Create indexes
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
node.safe_psql('postgres', 'checkpoint')
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
node_restored.cleanup()
tblspace1 = self.get_tblspace_path(node, 'somedata')
tblspace2 = self.get_tblspace_path(node_restored, 'somedata')
# Take PTRACK backup
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['-j10'])
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
self.restore_node(
backup_dir, 'node', node_restored,
backup_id=backup_id,
options=[
"-j", "4",
"-T{0}={1}".format(tblspace1, tblspace2)]
)
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
@unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_empty_replica(self):
"""Take backups of every available types from master and check that PTRACK on replica is clean"""
fname = self.id().split('.')[3]
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table
master.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap "
"(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector)")
self.wait_until_replica_catch_with_master(master, replica)
# Take FULL backup
self.backup_node(
backup_dir,
'replica',
replica,
options=[
'-j10', '--stream',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
# Create indexes
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
self.wait_until_replica_catch_with_master(master, replica)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
node_restored.cleanup()
# Take PTRACK backup
backup_id = self.backup_node(
backup_dir,
'replica',
replica,
backup_type='ptrack',
options=[
'-j10', '--stream',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
if self.paranoia:
pgdata = self.pgdata_content(replica.data_dir)
self.restore_node(
backup_dir, 'replica', node_restored,
backup_id=backup_id,
options=["-j", "4"]
)
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,74 +0,0 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_move_to_tablespace'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_recovery(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap as select i as id, "
"md5(i::text) as text,md5(repeat(i::text,10))::tsvector as "
"tsvector from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
# Move table and indexes and make checkpoint
for i in idx_ptrack:
if idx_ptrack[i]['type'] == 'heap':
node.safe_psql(
'postgres',
'alter table {0} set tablespace somedata;'.format(i))
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
'postgres',
'alter index {0} set tablespace somedata'.format(i))
node.safe_psql('postgres', 'checkpoint')
# Check ptrack files
for i in idx_ptrack:
if idx_ptrack[i]['type'] == 'seq':
continue
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack has correct bits after recovery
self.check_ptrack_recovery(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,72 +0,0 @@
import os
import unittest
from sys import exit
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_recovery'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_recovery(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table
node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
"as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
# Create indexes
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres", "create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['size'] = int(self.get_fork_size(node, i))
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
if self.verbose:
print('Killing postmaster. Losing Ptrack changes')
node.stop(['-m', 'immediate', '-D', node.data_dir])
if not node.status():
node.start()
else:
print("Die! Die! Why won't you die?... Why won't you die?")
exit(1)
for i in idx_ptrack:
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack has correct bits after recovery
self.check_ptrack_recovery(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,179 +0,0 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_truncate'
class SimpleTest(ProbackupTest, unittest.TestCase):
@unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_truncate(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
"as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'truncate t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(
backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Clean after yourself
self.del_test_dir(module_name, fname)
@unittest.skip("skip")
def test_ptrack_truncate_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
self.create_tblspace_in_node(master, 'somedata')
master.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
"as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql(
"postgres", "create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(
backup_dir, 'replica', replica,
options=[
'-j10',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
master.safe_psql('postgres', 'truncate t_heap')
master.safe_psql('postgres', 'checkpoint')
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,205 +0,0 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_vacuum'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
"as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
# Make full backup to clean every ptrack
self.backup_node(
backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Delete some rows, vacuum it and make checkpoint
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
master.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap as select i as id, "
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
"as tsvector from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
# Make FULL backup to clean every ptrack
self.backup_node(
backup_dir, 'replica', replica, options=[
'-j10', '--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Delete some rows, vacuum it and make checkpoint
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,193 +0,0 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_vacuum_bits_frozen'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_bits_frozen(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
res = node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
"as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
node.safe_psql('postgres', 'checkpoint')
self.backup_node(
backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'vacuum freeze t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_bits_frozen_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
master.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap as select i as id, "
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
"as tsvector from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
master.safe_psql('postgres', 'checkpoint')
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
# Take PTRACK backup to clean every ptrack
self.backup_node(
backup_dir, 'replica', replica,
options=[
'-j10',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
master.safe_psql('postgres', 'vacuum freeze t_heap')
master.safe_psql('postgres', 'checkpoint')
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,90 +0,0 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_vacuum_bits_visibility'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_bits_visibility(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
res = node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
"as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'checkpoint')
self.backup_node(
backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,201 +0,0 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
import time
module_name = 'ptrack_vacuum_full'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_full(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
res = node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
"as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres", "create index {0} on {1} "
"using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
self.backup_node(
backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'vacuum full t_heap')
node.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity, the most important part
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_full_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2',
'autovacuum': 'off',
'archive_timeout': '30s'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
master.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap as select i as id, "
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector as "
"tsvector from generate_series(0,256000) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
# Take FULL backup to clean every ptrack
self.backup_node(
backup_dir, 'replica', replica,
options=[
'-j10',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port),
'--stream'
]
)
# TODO: check that all ptrack are nullified
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'vacuum full t_heap')
master.safe_psql('postgres', 'checkpoint')
# Sync master and replica
self.wait_until_replica_catch_with_master(master, replica)
replica.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity, the most important part
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1,192 +0,0 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_vacuum_truncate'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_truncate(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
res = node.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap tablespace somedata "
"as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres", "create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
self.backup_node(
backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
node.safe_psql('postgres', 'delete from t_heap where id > 128;')
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_truncate_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Create table and indexes
master.safe_psql(
"postgres",
"create extension bloom; create sequence t_seq; "
"create table t_heap as select i as id, "
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
"as tsvector from generate_series(0,2560) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql(
"postgres", "create index {0} on {1} "
"using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# Take PTRACK backup to clean every ptrack
self.backup_node(
backup_dir, 'replica', replica,
options=[
'-j10',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port),
'--stream'
]
)
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
master.safe_psql('postgres', 'delete from t_heap where id > 128;')
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
success = True
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'],
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
if not self.check_ptrack_sanity(idx_ptrack[i]):
success = False
self.assertTrue(
success, 'Ptrack has failed to register changes in data files'
)
# Clean after yourself
self.del_test_dir(module_name, fname)

32
tests/remote.py Normal file
View File

@ -0,0 +1,32 @@
import unittest
import os
from time import sleep
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from .helpers.cfs_helpers import find_by_name
module_name = 'remote'
class RemoteTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_remote_1(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.backup_node(backup_dir, 'node', node)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -3,7 +3,6 @@ import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from datetime import datetime, timedelta
import subprocess
from sys import exit
import time
@ -22,7 +21,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'master'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -30,7 +29,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
'max_wal_senders': '2',
'ptrack_enable': 'on'}
)
master.start()
master.slow_start()
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
@ -45,7 +44,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# take full backup and restore it
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.set_replica(master, replica)
@ -79,7 +78,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# RESTORE FULL BACKUP TAKEN FROM PREVIOUS STEP
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
@ -138,7 +137,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'master'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -152,7 +151,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
self.backup_node(backup_dir, 'master', master)
@ -211,7 +210,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# RESTORE FULL BACKUP TAKEN FROM replica
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
@ -289,7 +288,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'master'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -305,7 +304,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
self.backup_node(backup_dir, 'master', master)
@ -350,7 +349,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'master'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -364,7 +363,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
self.backup_node(backup_dir, 'master', master)
@ -402,7 +401,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
replica.append_conf(
'recovery.conf', "recovery_min_apply_delay = '300s'")
replica.restart()
replica.stop()
replica.slow_start(replica=True)
master.pgbench_init(scale=10)

View File

@ -4,7 +4,8 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
import subprocess
from datetime import datetime
import sys
import time
from time import sleep
from datetime import datetime, timedelta
module_name = 'restore'
@ -18,7 +19,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery to latest from full backup"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -27,7 +28,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
pgbench = node.pgbench(
@ -66,7 +67,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery to latest from full + page backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -74,7 +75,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
@ -114,7 +115,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery to target timeline"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -122,7 +123,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
@ -183,7 +184,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery to target time"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -192,7 +193,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.append_conf("postgresql.auto.conf", "TimeZone = Europe/Moscow")
node.start()
node.slow_start()
node.pgbench_init(scale=2)
before = node.execute("postgres", "SELECT * FROM pgbench_branches")
@ -232,7 +233,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery to target xid"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -240,7 +241,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
with node.connect("postgres") as con:
@ -293,7 +294,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery with target inclusive false"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
@ -304,7 +305,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
with node.connect("postgres") as con:
@ -358,7 +359,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery to target lsn"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -371,7 +372,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
with node.connect("postgres") as con:
@ -431,7 +432,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery to target lsn"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -444,7 +445,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
with node.connect("postgres") as con:
@ -505,7 +506,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery to latest from archive full+ptrack backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
)
@ -513,7 +514,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
@ -553,7 +554,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery to latest from archive full+ptrack+ptrack backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
)
@ -561,7 +562,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
@ -608,7 +609,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery in stream mode to latest from full + ptrack backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -620,7 +621,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
@ -663,7 +664,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -675,7 +676,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.pgbench_init(scale=2)
@ -729,7 +730,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
@ -741,7 +742,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# wal_segment_size = self.guc_wal_segment_size(node)
node.pgbench_init(scale=2)
@ -794,7 +795,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery using tablespace-mapping option"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
@ -805,7 +806,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# Create tablespace
tblspc_path = os.path.join(node.base_dir, "tblspc")
@ -832,15 +833,17 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(
self.assertIn(
'ERROR: restore destination is not empty: "{0}"'.format(node.data_dir),
e.message,
'ERROR: restore destination is not empty: "{0}"\n'.format(
node.data_dir),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# 2 - Try to restore to existing tablespace directory
tblspc_path_tmp = os.path.join(node.base_dir, "tblspc_tmp")
os.rename(tblspc_path, tblspc_path_tmp)
node.cleanup()
os.rename(tblspc_path_tmp, tblspc_path)
try:
self.restore_node(backup_dir, 'node', node)
# we should die here because exception is what we expect to happen
@ -850,10 +853,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"not empty.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(
self.assertIn(
'ERROR: restore tablespace destination is not empty:',
e.message,
'ERROR: restore tablespace destination '
'is not empty: "{0}"\n'.format(tblspc_path),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
@ -913,7 +915,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""recovery using tablespace-mapping option and page backup"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -921,7 +923,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# Full backup
self.backup_node(backup_dir, 'node', node)
@ -995,7 +997,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
@ -1004,7 +1006,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(
backup_dir, 'node', node, options=["--stream"])
@ -1045,7 +1047,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
@ -1054,7 +1056,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(
backup_dir, 'node', node, options=["--stream"])
@ -1094,7 +1096,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
@ -1103,7 +1105,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(
backup_dir, 'node', node, options=["--stream"])
@ -1143,7 +1145,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -1151,7 +1153,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(backup_dir, 'node', node)
if self.paranoia:
@ -1159,7 +1161,10 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.safe_psql("postgres", "create table t_heap(a int)")
node.stop()
node.cleanup()
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
node_restored.cleanup()
recovery_time = self.show_pb(
backup_dir, 'node', backup_id)['recovery-time']
@ -1167,22 +1172,24 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.assertIn(
"INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(
backup_dir, 'node', node,
backup_dir, 'node', node_restored,
options=[
"-j", "4", '--time={0}'.format(recovery_time),
"--recovery-target-action=promote"
]
"--recovery-target-action=promote"]
),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir)
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node.slow_start()
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
result = node.psql("postgres", 'select * from t_heap')
node_restored.slow_start()
result = node_restored.psql("postgres", 'select * from t_heap')
self.assertTrue('does not exist' in result[2].decode("utf-8"))
# Clean after yourself
@ -1198,7 +1205,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -1206,7 +1213,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(backup_dir, 'node', node)
@ -1247,7 +1254,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
def test_zags_block_corrupt(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -1255,7 +1262,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(backup_dir, 'node', node)
@ -1303,7 +1310,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node_restored'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -1325,7 +1332,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
def test_zags_block_corrupt_1(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
@ -1336,7 +1343,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.backup_node(backup_dir, 'node', node)
@ -1395,7 +1402,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.switch_wal_segment(node)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node_restored'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -1440,3 +1447,338 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
True,
'Failed to start pg_wal_dump: {0}'.format(
pg_receivexlog.communicate()[1]))
# @unittest.skip("skip")
def test_restore_chain(self):
"""
make node, take full backup, take several
ERROR delta backups, take valid delta backup,
restore must be successfull
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL
self.backup_node(
backup_dir, 'node', node)
# Take DELTA
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
# Take ERROR DELTA
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--archive-timeout=0s'])
except ProbackupException as e:
pass
# Take ERROR DELTA
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--archive-timeout=0s'])
except ProbackupException as e:
pass
# Take DELTA
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
# Take ERROR DELTA
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--archive-timeout=0s'])
except ProbackupException as e:
pass
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[0]['status'],
'Backup STATUS should be "OK"')
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[1]['status'],
'Backup STATUS should be "OK"')
self.assertEqual(
'ERROR',
self.show_pb(backup_dir, 'node')[2]['status'],
'Backup STATUS should be "ERROR"')
self.assertEqual(
'ERROR',
self.show_pb(backup_dir, 'node')[3]['status'],
'Backup STATUS should be "ERROR"')
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[4]['status'],
'Backup STATUS should be "OK"')
self.assertEqual(
'ERROR',
self.show_pb(backup_dir, 'node')[5]['status'],
'Backup STATUS should be "ERROR"')
node.cleanup()
self.restore_node(backup_dir, 'node', node)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_restore_chain_with_corrupted_backup(self):
"""more complex test_restore_chain()"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL
self.backup_node(
backup_dir, 'node', node)
# Take DELTA
self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Take ERROR DELTA
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='page', options=['--archive-timeout=0s'])
except ProbackupException as e:
pass
# Take 1 DELTA
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
# Take ERROR DELTA
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--archive-timeout=0s'])
except ProbackupException as e:
pass
# Take 2 DELTA
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
# Take ERROR DELTA
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--archive-timeout=0s'])
except ProbackupException as e:
pass
# Take 3 DELTA
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
# Corrupted 4 DELTA
corrupt_id = self.backup_node(
backup_dir, 'node', node, backup_type='delta')
# ORPHAN 5 DELTA
restore_target_id = self.backup_node(
backup_dir, 'node', node, backup_type='delta')
# ORPHAN 6 DELTA
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
# NEXT FULL BACKUP
self.backup_node(
backup_dir, 'node', node, backup_type='full')
# Next Delta
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
# do corrupt 6 DELTA backup
file = os.path.join(
backup_dir, 'backups', 'node',
corrupt_id, 'database', 'global', 'pg_control')
file_new = os.path.join(backup_dir, 'pg_control')
os.rename(file, file_new)
# RESTORE BACKUP
node.cleanup()
try:
self.restore_node(
backup_dir, 'node', node, backup_id=restore_target_id)
self.assertEqual(
1, 0,
"Expecting Error because restore backup is corrupted.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: Backup {0} is orphan'.format(restore_target_id),
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[0]['status'],
'Backup STATUS should be "OK"')
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[1]['status'],
'Backup STATUS should be "OK"')
self.assertEqual(
'ERROR',
self.show_pb(backup_dir, 'node')[2]['status'],
'Backup STATUS should be "ERROR"')
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[3]['status'],
'Backup STATUS should be "OK"')
self.assertEqual(
'ERROR',
self.show_pb(backup_dir, 'node')[4]['status'],
'Backup STATUS should be "ERROR"')
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[5]['status'],
'Backup STATUS should be "OK"')
self.assertEqual(
'ERROR',
self.show_pb(backup_dir, 'node')[6]['status'],
'Backup STATUS should be "ERROR"')
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[7]['status'],
'Backup STATUS should be "OK"')
# corruption victim
self.assertEqual(
'CORRUPT',
self.show_pb(backup_dir, 'node')[8]['status'],
'Backup STATUS should be "CORRUPT"')
# orphaned child
self.assertEqual(
'ORPHAN',
self.show_pb(backup_dir, 'node')[9]['status'],
'Backup STATUS should be "ORPHAN"')
# orphaned child
self.assertEqual(
'ORPHAN',
self.show_pb(backup_dir, 'node')[10]['status'],
'Backup STATUS should be "ORPHAN"')
# next FULL
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[11]['status'],
'Backup STATUS should be "OK"')
# next DELTA
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[12]['status'],
'Backup STATUS should be "OK"')
node.cleanup()
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_restore_backup_from_future(self):
"""more complex test_restore_chain()"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL
self.backup_node(backup_dir, 'node', node)
node.pgbench_init(scale=3)
#pgbench = node.pgbench(options=['-T', '20', '-c', '2'])
#pgbench.wait()
# Take PAGE from future
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
with open(
os.path.join(
backup_dir, 'backups', 'node',
backup_id, "backup.control"), "a") as conf:
conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() + timedelta(days=3)))
# rename directory
new_id = self.show_pb(backup_dir, 'node')[1]['id']
os.rename(
os.path.join(backup_dir, 'backups', 'node', backup_id),
os.path.join(backup_dir, 'backups', 'node', new_id))
pgbench = node.pgbench(options=['-T', '3', '-c', '2', '--no-vacuum'])
pgbench.wait()
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
pgdata = self.pgdata_content(node.data_dir)
node.cleanup()
self.restore_node(backup_dir, 'node', node, backup_id=backup_id)
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)

1247
tests/retention.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,178 +0,0 @@
import os
import unittest
from datetime import datetime, timedelta
from .helpers.ptrack_helpers import ProbackupTest
module_name = 'retention'
class RetentionTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_retention_redundancy_1(self):
"""purge backups using redundancy-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
with open(os.path.join(
backup_dir, 'backups', 'node',
"pg_probackup.conf"), "a") as conf:
conf.write("retention-redundancy = 1\n")
# Make backups to be purged
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, backup_type="page")
# Make backups to be keeped
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, backup_type="page")
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4)
# Purge backups
log = self.delete_expired(backup_dir, 'node')
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
# Check that WAL segments were deleted
min_wal = None
max_wal = None
for line in log.splitlines():
if line.startswith("INFO: removed min WAL segment"):
min_wal = line[31:-1]
elif line.startswith("INFO: removed max WAL segment"):
max_wal = line[31:-1]
if not min_wal:
self.assertTrue(False, "min_wal is empty")
if not max_wal:
self.assertTrue(False, "max_wal is not set")
for wal_name in os.listdir(os.path.join(backup_dir, 'wal', 'node')):
if not wal_name.endswith(".backup"):
# wal_name_b = wal_name.encode('ascii')
self.assertEqual(wal_name[8:] > min_wal[8:], True)
self.assertEqual(wal_name[8:] > max_wal[8:], True)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("123")
def test_retention_window_2(self):
"""purge backups using window-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
with open(
os.path.join(
backup_dir,
'backups',
'node',
"pg_probackup.conf"), "a") as conf:
conf.write("retention-redundancy = 1\n")
conf.write("retention-window = 1\n")
# Make backups to be purged
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, backup_type="page")
# Make backup to be keeped
self.backup_node(backup_dir, 'node', node)
backups = os.path.join(backup_dir, 'backups', 'node')
days_delta = 5
for backup in os.listdir(backups):
if backup == 'pg_probackup.conf':
continue
with open(
os.path.join(
backups, backup, "backup.control"), "a") as conf:
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() - timedelta(days=days_delta)))
days_delta -= 1
# Make backup to be keeped
self.backup_node(backup_dir, 'node', node, backup_type="page")
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4)
# Purge backups
self.delete_expired(backup_dir, 'node')
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("123")
def test_retention_wal(self):
"""purge backups using window-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100500) i")
# Take FULL BACKUP
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100500) i")
self.backup_node(backup_dir, 'node', node)
backups = os.path.join(backup_dir, 'backups', 'node')
days_delta = 5
for backup in os.listdir(backups):
if backup == 'pg_probackup.conf':
continue
with open(
os.path.join(
backups, backup, "backup.control"), "a") as conf:
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() - timedelta(days=days_delta)))
days_delta -= 1
# Make backup to be keeped
self.backup_node(backup_dir, 'node', node, backup_type="page")
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3)
# Purge backups
self.delete_expired(
backup_dir, 'node', options=['--retention-window=2'])
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -15,15 +15,14 @@ class OptionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
pg_options={'wal_level': 'replica'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.assertEqual(
self.backup_node(
@ -43,15 +42,14 @@ class OptionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
pg_options={'wal_level': 'replica'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
self.assertEqual(
self.backup_node(
@ -71,15 +69,14 @@ class OptionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
pg_options={'wal_level': 'replica'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(backup_dir, 'node', node)
@ -117,15 +114,14 @@ class OptionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
pg_options={'wal_level': 'replica'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(backup_dir, 'node', node)
@ -135,7 +131,9 @@ class OptionTest(ProbackupTest, unittest.TestCase):
backup_id, "backup.control")
os.remove(file)
self.assertIn('Control file "{0}" doesn\'t exist'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
self.assertIn(
'Control file "{0}" doesn\'t exist'.format(file),
self.show_pb(backup_dir, 'node', as_text=True, as_json=False))
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -146,15 +144,14 @@ class OptionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
pg_options={'wal_level': 'replica'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(backup_dir, 'node', node)
@ -165,7 +162,9 @@ class OptionTest(ProbackupTest, unittest.TestCase):
fd = open(file, 'w')
fd.close()
self.assertIn('Control file "{0}" is empty'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
self.assertIn(
'Control file "{0}" is empty'.format(file),
self.show_pb(backup_dir, 'node', as_text=True, as_json=False))
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -177,15 +176,14 @@ class OptionTest(ProbackupTest, unittest.TestCase):
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
pg_options={'wal_level': 'replica'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
backup_id = self.backup_node(backup_dir, 'node', node)
@ -199,7 +197,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
self.assertIn(
'WARNING: Invalid option "statuss" in file'.format(file),
self.show_pb(backup_dir, 'node', as_text=True))
self.show_pb(backup_dir, 'node', as_json=False, as_text=True))
# Clean after yourself
# self.del_test_dir(module_name, fname)
self.del_test_dir(module_name, fname)

62
tests/snapfs.py Normal file
View File

@ -0,0 +1,62 @@
import unittest
import os
from time import sleep
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
module_name = 'snapfs'
class SnapFSTest(ProbackupTest, unittest.TestCase):
# @unittest.expectedFailure
@unittest.skipUnless(ProbackupTest.enterprise, 'skip')
def test_snapfs_simple(self):
"""standart backup modes with ARCHIVE WAL method"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
'postgres',
'select pg_make_snapshot()')
node.pgbench_init(scale=10)
pgbench = node.pgbench(options=['-T', '50', '-c', '2', '--no-vacuum'])
pgbench.wait()
self.backup_node(
backup_dir, 'node', node, backup_type='page')
node.safe_psql(
'postgres',
'select pg_remove_snapshot(1)')
self.backup_node(
backup_dir, 'node', node, backup_type='page')
pgdata = self.pgdata_content(node.data_dir)
node.cleanup()
self.restore_node(
backup_dir, 'node',
node, options=["-j", "4"])
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)

52
tests/time_stamp.py Normal file
View File

@ -0,0 +1,52 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
module_name = 'time_stamp'
class CheckTimeStamp(ProbackupTest, unittest.TestCase):
def test_start_time_format(self):
"""Test backup ID changing after start-time editing in backup.control.
We should convert local time in UTC format"""
# Create simple node
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node, options=['--stream', '-j 2'])
show_backup = self.show_pb(backup_dir, 'node')
i = 0
while i < 2:
with open(os.path.join(backup_dir, "backups", "node", backup_id, "backup.control"), "r+") as f:
output = ""
for line in f:
if line.startswith('start-time') is True:
if i == 0:
output = output + str(line[:-5])+'+00\''+'\n'
else:
output = output + str(line[:-5]) + '\'' + '\n'
else:
output = output + str(line)
f.close()
with open(os.path.join(backup_dir, "backups", "node", backup_id, "backup.control"), "w") as fw:
fw.write(output)
fw.flush()
show_backup = show_backup + self.show_pb(backup_dir, 'node')
i += 1
self.assertTrue(show_backup[1]['id'] == show_backup[2]['id'], "ERROR: Localtime format using instead of UTC")
node.stop()
# Clean after yourself
self.del_test_dir(module_name, fname)

File diff suppressed because it is too large Load Diff