1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-09-16 09:26:30 +02:00

[Issue #280] Expand "--force" flag for incremental restore, now in case of system ID mismatch the destination PGDATA will be deleted; the content of the directory, used as destination for tablespace remapping, is now also deleted. Tablespace map is now validated before reading.

This commit is contained in:
Grigory Smolkin
2021-01-22 15:56:14 +03:00
parent b16555acd6
commit 4e445024f2
12 changed files with 965 additions and 127 deletions

View File

@@ -3976,6 +3976,10 @@ pg_probackup restore -B <replaceable>backup_dir</replaceable> --instance <replac
this flag if you need to restore the
<productname>PostgreSQL</productname> cluster from a corrupt or an invalid backup.
Use with caution.
When used with <link linkend="pbk-incremental-restore">incremental restore</link> this flag
allows to replace already existing PGDATA with different system ID. In case of tablespaces,
remapped via <literal>--tablespace-mapping</literal> option into not empty directories,
the old content of such directories will be deleted.
</para>
</listitem>
</varlistentry>

View File

@@ -286,12 +286,8 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
join_path_components(external_prefix, current.root_dir, EXTERNAL_DIR);
/* list files with the logical path. omit $PGDATA */
if (fio_is_remote(FIO_DB_HOST))
fio_list_dir(backup_files_list, instance_config.pgdata,
true, true, false, backup_logs, true, 0);
else
dir_list_file(backup_files_list, instance_config.pgdata,
true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST);
fio_list_dir(backup_files_list, instance_config.pgdata,
true, true, false, backup_logs, true, 0);
/*
* Get database_map (name to oid) for use in partial restore feature.

View File

@@ -720,6 +720,7 @@ do_retention_wal(bool dry_run)
/*
* Delete backup files of the backup and update the status of the backup to
* BACKUP_STATUS_DELETED.
* TODO: delete files on multiple threads
*/
void
delete_backup_files(pgBackup *backup)

181
src/dir.c
View File

@@ -130,6 +130,7 @@ static void dir_list_file_internal(parray *files, pgFile *parent, const char *pa
bool skip_hidden, int external_dir_num, fio_location location);
static void opt_path_map(ConfigOption *opt, const char *arg,
TablespaceList *list, const char *type);
static void cleanup_tablespace(const char *path);
/* Tablespace mapping */
static TablespaceList tablespace_dirs = {NULL, NULL};
@@ -518,6 +519,8 @@ db_map_entry_free(void *entry)
*
* When follow_symlink is true, symbolic link is ignored and only file or
* directory linked to will be listed.
*
* TODO: make it strictly local
*/
void
dir_list_file(parray *files, const char *root, bool exclude, bool follow_symlink,
@@ -1088,7 +1091,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba
const char *linked_path = get_tablespace_mapping((*link)->linked);
if (!is_absolute_path(linked_path))
elog(ERROR, "Tablespace directory is not an absolute path: %s\n",
elog(ERROR, "Tablespace directory path must be an absolute path: %s\n",
linked_path);
join_path_components(to_path, data_dir, dir->rel_path);
@@ -1128,7 +1131,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba
* tablespace_map or tablespace_map.txt.
*/
void
read_tablespace_map(parray *files, const char *backup_dir)
read_tablespace_map(parray *links, const char *backup_dir)
{
FILE *fp;
char db_path[MAXPGPATH],
@@ -1138,16 +1141,9 @@ read_tablespace_map(parray *files, const char *backup_dir)
join_path_components(db_path, backup_dir, DATABASE_DIR);
join_path_components(map_path, db_path, PG_TABLESPACE_MAP_FILE);
/* Exit if database/tablespace_map doesn't exist */
if (!fileExists(map_path, FIO_BACKUP_HOST))
{
elog(LOG, "there is no file tablespace_map");
return;
}
fp = fio_open_stream(map_path, FIO_BACKUP_HOST);
if (fp == NULL)
elog(ERROR, "cannot open \"%s\": %s", map_path, strerror(errno));
elog(ERROR, "Cannot open tablespace map file \"%s\": %s", map_path, strerror(errno));
while (fgets(buf, lengthof(buf), fp))
{
@@ -1166,7 +1162,7 @@ read_tablespace_map(parray *files, const char *backup_dir)
file->linked = pgut_strdup(path);
canonicalize_path(file->linked);
parray_append(files, file);
parray_append(links, file);
}
if (ferror(fp))
@@ -1183,30 +1179,49 @@ read_tablespace_map(parray *files, const char *backup_dir)
* If tablespace-mapping option is supplied, all OLDDIR entries must have
* entries in tablespace_map file.
*
*
* TODO: maybe when running incremental restore with tablespace remapping, then
* new tablespace directory MUST be empty? because there is no way
* When running incremental restore with tablespace remapping, then
* new tablespace directory MUST be empty, because there is no way
* we can be sure, that files laying there belong to our instance.
* But "force" flag allows to ignore this condition, by wiping out
* the current content on the directory.
*
* Exit codes:
* 1. backup has no tablespaces
* 2. backup has tablespaces and they are empty
* 3. backup has tablespaces and some of them are not empty
*/
void
check_tablespace_mapping(pgBackup *backup, bool incremental, bool *tblspaces_are_empty)
int
check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pgdata_is_empty)
{
// char this_backup_path[MAXPGPATH];
parray *links;
parray *links = parray_new();
size_t i;
TablespaceListCell *cell;
pgFile *tmp_file = pgut_new(pgFile);
bool tblspaces_are_empty = true;
links = parray_new();
elog(LOG, "Checking tablespace directories of backup %s",
base36enc(backup->start_time));
/* validate tablespace map,
* if there are no tablespaces, then there is nothing left to do
*/
if (!validate_tablespace_map(backup))
{
/*
* Sanity check
* If there is no tablespaces in backup,
* then using the '--tablespace-mapping' option is a mistake.
*/
if (tablespace_dirs.head != NULL)
elog(ERROR, "Backup %s has no tablespaceses, nothing to remap "
"via \"--tablespace-mapping\" option", base36enc(backup->backup_id));
return NoTblspc;
}
// pgBackupGetPath(backup, this_backup_path, lengthof(this_backup_path), NULL);
read_tablespace_map(links, backup->root_dir);
/* Sort links by the path of a linked file*/
parray_qsort(links, pgFileCompareLinked);
elog(LOG, "check tablespace directories of backup %s",
base36enc(backup->start_time));
/* 1 - each OLDDIR must have an entry in tablespace_map file (links) */
for (cell = tablespace_dirs.head; cell; cell = cell->next)
{
@@ -1216,52 +1231,109 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool *tblspaces_are
elog(ERROR, "--tablespace-mapping option's old directory "
"doesn't have an entry in tablespace_map file: \"%s\"",
cell->old_dir);
/* For incremental restore, check that new directory is empty */
// if (incremental)
// {
// if (!is_absolute_path(cell->new_dir))
// elog(ERROR, "tablespace directory is not an absolute path: %s\n",
// cell->new_dir);
//
// if (!dir_is_empty(cell->new_dir, FIO_DB_HOST))
// elog(ERROR, "restore tablespace destination is not empty: \"%s\"",
// cell->new_dir);
// }
}
/*
* There is difference between incremental restore of already existing
* tablespaceses and remapped tablespaceses.
* Former are allowed to be not empty, because we treat them like an
* extension of PGDATA.
* The latter are not, unless "--force" flag is used.
* in which case the remapped directory is nuked - just to be safe,
* because it is hard to be sure that there are no some tricky corner
* cases of pages from different systems having the same crc.
* This is a strict approach.
*
* Why can`t we not nuke it and just let it roll ?
* What if user just wants to rerun failed restore with the same
* parameters? Nuking is bad for this case.
*
* Consider the example of existing PGDATA:
* ....
* pg_tablespace
* 100500-> /somedirectory
* ....
*
* We want to remap it during restore like that:
* ....
* pg_tablespace
* 100500-> /somedirectory1
* ....
*
* Usually it is required for "/somedirectory1" to be empty, but
* in case of incremental restore with 'force' flag, which required
* of us to drop already existing content of "/somedirectory1".
*
* TODO: Ideally in case of incremental restore we must also
* drop the "/somedirectory" directory first, but currently
* we don`t do that.
*/
/* 2 - all linked directories must be empty */
for (i = 0; i < parray_num(links); i++)
{
pgFile *link = (pgFile *) parray_get(links, i);
const char *linked_path = link->linked;
TablespaceListCell *cell;
bool remapped = false;
for (cell = tablespace_dirs.head; cell; cell = cell->next)
if (strcmp(link->linked, cell->old_dir) == 0)
{
linked_path = cell->new_dir;
remapped = true;
break;
}
if (!is_absolute_path(linked_path))
elog(ERROR, "tablespace directory is not an absolute path: %s\n",
elog(ERROR, "Tablespace directory path must be an absolute path: %s\n",
linked_path);
if (!dir_is_empty(linked_path, FIO_DB_HOST))
{
if (!incremental)
elog(ERROR, "restore tablespace destination is not empty: \"%s\"",
linked_path);
*tblspaces_are_empty = false;
elog(ERROR, "Restore tablespace destination is not empty: \"%s\"", linked_path);
else if (remapped && !force)
elog(ERROR, "Remapped tablespace destination is not empty: \"%s\". "
"Use \"--force\" flag if you want to automatically clean up the "
"content of new tablespace destination",
linked_path);
else if (pgdata_is_empty && !force)
elog(ERROR, "PGDATA is empty, but tablespace destination is not: \"%s\". "
"Use \"--force\" flag is you want to automatically clean up the "
"content of tablespace destination",
linked_path);
/*
* TODO: compile the list of tblspc Oids to delete later,
* similar to what we do with database_map.
*/
else if (force && (pgdata_is_empty || remapped))
{
elog(WARNING, "Cleaning up the content of %s directory: \"%s\"",
remapped ? "remapped tablespace" : "tablespace", linked_path);
cleanup_tablespace(linked_path);
continue;
}
tblspaces_are_empty = false;
}
}
free(tmp_file);
parray_walk(links, pgFileFree);
parray_free(links);
if (tblspaces_are_empty)
return EmptyTblspc;
return NotEmptyTblspc;
}
/* TODO: Make it consistent with check_tablespace_mapping */
void
check_external_dir_mapping(pgBackup *backup, bool incremental)
{
@@ -1854,3 +1926,34 @@ read_database_map(pgBackup *backup)
return database_map;
}
/*
* Use it to cleanup tablespaces
* TODO: Current algorihtm is not very efficient in remote mode,
* due to round-trip to delete every file.
*/
void
cleanup_tablespace(const char *path)
{
int i;
char fullpath[MAXPGPATH];
parray *files = parray_new();
fio_list_dir(files, path, false, false, false, false, false, 0);
/* delete leaf node first */
parray_qsort(files, pgFileCompareRelPathWithExternalDesc);
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
join_path_components(fullpath, path, file->rel_path);
fio_delete(file->mode, fullpath, FIO_DB_HOST);
elog(VERBOSE, "Deleted file \"%s\"", fullpath);
}
parray_walk(files, pgFileFree);
parray_free(files);
}

View File

@@ -132,6 +132,16 @@ typedef struct db_map_entry
char *datname;
} db_map_entry;
/* State of pgdata in the context of its compatibility for incremental restore */
typedef enum DestDirIncrCompatibility
{
POSTMASTER_IS_RUNNING,
SYSTEM_ID_MISMATCH,
BACKUP_LABEL_EXISTS,
DEST_IS_NOT_OK,
DEST_OK
} DestDirIncrCompatibility;
typedef enum IncrRestoreMode
{
INCR_NONE,
@@ -250,6 +260,11 @@ typedef struct page_map_entry
/* Special values of datapagemap_t bitmapsize */
#define PageBitmapIsEmpty 0 /* Used to mark unchanged datafiles */
/* Return codes for check_tablespace_mapping */
#define NoTblspc 0
#define EmptyTblspc 1
#define NotEmptyTblspc 2
/* Current state of backup */
typedef enum BackupStatus
{
@@ -868,6 +883,7 @@ extern int do_validate_all(void);
extern int validate_one_page(Page page, BlockNumber absolute_blkno,
XLogRecPtr stop_lsn, PageState *page_st,
uint32 checksum_version);
extern bool validate_tablespace_map(pgBackup *backup);
/* return codes for validate_one_page */
/* TODO: use enum */
@@ -957,10 +973,10 @@ extern void create_data_directories(parray *dest_files,
bool incremental,
fio_location location);
extern void read_tablespace_map(parray *files, const char *backup_dir);
extern void read_tablespace_map(parray *links, const char *backup_dir);
extern void opt_tablespace_map(ConfigOption *opt, const char *arg);
extern void opt_externaldir_map(ConfigOption *opt, const char *arg);
extern void check_tablespace_mapping(pgBackup *backup, bool incremental, bool *tblspaces_are_empty);
extern int check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pgdata_is_empty);
extern void check_external_dir_mapping(pgBackup *backup, bool incremental);
extern char *get_external_remap(char *current_dir);

View File

@@ -65,9 +65,10 @@ static void set_orphan_status(parray *backups, pgBackup *parent_backup);
static void restore_chain(pgBackup *dest_backup, parray *parent_chain,
parray *dbOid_exclude_list, pgRestoreParams *params,
const char *pgdata_path, bool no_sync);
static void check_incremental_compatibility(const char *pgdata, uint64 system_identifier,
IncrRestoreMode incremental_mode);
const char *pgdata_path, bool no_sync, bool cleanup_pgdata,
bool backup_has_tblspc);
static DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier,
IncrRestoreMode incremental_mode);
/*
* Iterate over backup list to find all ancestors of the broken parent_backup
@@ -131,39 +132,87 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
parray *parent_chain = NULL;
parray *dbOid_exclude_list = NULL;
bool pgdata_is_empty = true;
bool tblspaces_are_empty = true;
bool cleanup_pgdata = false;
bool backup_has_tblspc = true; /* backup contain tablespace */
XLogRecPtr shift_lsn = InvalidXLogRecPtr;
if (instance_name == NULL)
elog(ERROR, "required parameter not specified: --instance");
if (params->is_restore)
{
if (instance_config.pgdata == NULL)
elog(ERROR,
"required parameter not specified: PGDATA (-D, --pgdata)");
/* Check if restore destination empty */
if (!dir_is_empty(instance_config.pgdata, FIO_DB_HOST))
{
/* if destination directory is empty, then incremental restore may be disabled */
pgdata_is_empty = false;
/* Check that remote system is NOT running and systemd id is the same as ours */
if (params->incremental_mode != INCR_NONE)
{
DestDirIncrCompatibility rc;
bool ok_to_go = true;
elog(INFO, "Running incremental restore into nonempty directory: \"%s\"",
instance_config.pgdata);
check_incremental_compatibility(instance_config.pgdata,
instance_config.system_identifier,
params->incremental_mode);
rc = check_incremental_compatibility(instance_config.pgdata,
instance_config.system_identifier,
params->incremental_mode);
if (rc == POSTMASTER_IS_RUNNING)
{
/* Even with force flag it is unwise to run
* incremental restore over running instance
*/
ok_to_go = false;
}
else if (rc == SYSTEM_ID_MISMATCH)
{
/*
* In force mode it is possible to ignore system id mismatch
* by just wiping clean the destination directory.
*/
if (params->incremental_mode != INCR_NONE && params->force)
cleanup_pgdata = true;
else
ok_to_go = false;
}
else if (rc == BACKUP_LABEL_EXISTS)
{
/*
* A big no-no for lsn-based incremental restore
* If there is backup label in PGDATA, then this cluster was probably
* restored from backup, but not started yet. Which means that values
* in pg_control are not synchronized with PGDATA and so we cannot use
* incremental restore in LSN mode, because it is relying on pg_control
* to calculate switchpoint.
*/
if (params->incremental_mode == INCR_LSN)
ok_to_go = false;
}
else if (rc == DEST_IS_NOT_OK)
{
/*
* Something else is wrong. For example, postmaster.pid is mangled,
* so we cannot be sure that postmaster is running or not.
* It is better to just error out.
*/
ok_to_go = false;
}
if (!ok_to_go)
elog(ERROR, "Incremental restore is not allowed");
}
else
elog(ERROR, "Restore destination is not empty: \"%s\"",
instance_config.pgdata);
/* if destination directory is empty, then incremental restore may be disabled */
pgdata_is_empty = false;
}
}
if (instance_name == NULL)
elog(ERROR, "required parameter not specified: --instance");
elog(LOG, "%s begin.", action);
/* Get list of all backups sorted in order of descending start time */
@@ -356,9 +405,15 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
*/
if (params->is_restore)
{
check_tablespace_mapping(dest_backup, params->incremental_mode != INCR_NONE, &tblspaces_are_empty);
int rc = check_tablespace_mapping(dest_backup,
params->incremental_mode != INCR_NONE, params->force,
pgdata_is_empty);
if (params->incremental_mode != INCR_NONE && pgdata_is_empty && tblspaces_are_empty)
/* backup contain no tablespaces */
if (rc == NoTblspc)
backup_has_tblspc = false;
if (params->incremental_mode != INCR_NONE && !cleanup_pgdata && pgdata_is_empty && (rc != NotEmptyTblspc))
{
elog(INFO, "Destination directory and tablespace directories are empty, "
"disable incremental restore");
@@ -366,6 +421,9 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
}
/* no point in checking external directories if their restore is not requested */
//TODO:
// - make check_external_dir_mapping more like check_tablespace_mapping
// - honor force flag in case of incremental restore just like check_tablespace_mapping
if (!params->skip_external_dirs)
check_external_dir_mapping(dest_backup, params->incremental_mode != INCR_NONE);
}
@@ -610,8 +668,8 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
base36enc(dest_backup->start_time),
dest_backup->server_version);
restore_chain(dest_backup, parent_chain, dbOid_exclude_list,
params, instance_config.pgdata, no_sync);
restore_chain(dest_backup, parent_chain, dbOid_exclude_list, params,
instance_config.pgdata, no_sync, cleanup_pgdata, backup_has_tblspc);
//TODO rename and update comment
/* Create recovery.conf with given recovery target parameters */
@@ -634,11 +692,13 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/*
* Restore backup chain.
* Flag 'cleanup_pgdata' demands the removing of already existing content in PGDATA.
*/
void
restore_chain(pgBackup *dest_backup, parray *parent_chain,
parray *dbOid_exclude_list, pgRestoreParams *params,
const char *pgdata_path, bool no_sync)
const char *pgdata_path, bool no_sync, bool cleanup_pgdata,
bool backup_has_tblspc)
{
int i;
char timestamp[100];
@@ -736,7 +796,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
* Restore dest_backup internal directories.
*/
create_data_directories(dest_files, instance_config.pgdata,
dest_backup->root_dir, true,
dest_backup->root_dir, backup_has_tblspc,
params->incremental_mode != INCR_NONE,
FIO_DB_HOST);
@@ -789,18 +849,24 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
}
/* Get list of files in destination directory and remove redundant files */
if (params->incremental_mode != INCR_NONE)
if (params->incremental_mode != INCR_NONE || cleanup_pgdata)
{
pgdata_files = parray_new();
elog(INFO, "Extracting the content of destination directory for incremental restore");
time(&start_time);
if (fio_is_remote(FIO_DB_HOST))
fio_list_dir(pgdata_files, pgdata_path, false, true, false, false, true, 0);
else
dir_list_file(pgdata_files, pgdata_path,
false, true, false, false, true, 0, FIO_LOCAL_HOST);
fio_list_dir(pgdata_files, pgdata_path, false, true, false, false, true, 0);
/*
* TODO:
* 1. Currently we are cleaning the tablespaces in check_tablespace_mapping and PGDATA here.
* It would be great to do all this work in one place.
*
* 2. In case of tablespace remapping we do not cleanup the old tablespace path,
* it is just left as it is.
* Lookup tests.incr_restore.IncrRestoreTest.test_incr_restore_with_tablespace_5
*/
/* get external dirs content */
if (external_dirs)
@@ -810,13 +876,8 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
char *external_path = parray_get(external_dirs, i);
parray *external_files = parray_new();
if (fio_is_remote(FIO_DB_HOST))
fio_list_dir(external_files, external_path,
false, true, false, false, true, i+1);
else
dir_list_file(external_files, external_path,
false, true, false, false, true, i+1,
FIO_LOCAL_HOST);
fio_list_dir(external_files, external_path,
false, true, false, false, true, i+1);
parray_concat(pgdata_files, external_files);
parray_free(external_files);
@@ -836,25 +897,41 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
time(&start_time);
for (i = 0; i < parray_num(pgdata_files); i++)
{
pgFile *file = (pgFile *) parray_get(pgdata_files, i);
bool redundant = true;
pgFile *file = (pgFile *) parray_get(pgdata_files, i);
if (parray_bsearch(dest_backup->files, file, pgFileCompareRelPathWithExternal))
redundant = false;
/* do not delete the useful internal directories */
if (S_ISDIR(file->mode) && !redundant)
continue;
/* if file does not exists in destination list, then we can safely unlink it */
if (parray_bsearch(dest_backup->files, file, pgFileCompareRelPathWithExternal) == NULL)
if (cleanup_pgdata || redundant)
{
char fullpath[MAXPGPATH];
join_path_components(fullpath, pgdata_path, file->rel_path);
// fio_pgFileDelete(file, full_file_path);
fio_delete(file->mode, fullpath, FIO_DB_HOST);
elog(VERBOSE, "Deleted file \"%s\"", fullpath);
/* shrink pgdata list */
pgFileFree(file);
parray_remove(pgdata_files, i);
i--;
}
}
if (cleanup_pgdata)
{
/* Destination PGDATA and tablespaces were cleaned up, so it's the regular restore from this point */
params->incremental_mode = INCR_NONE;
parray_free(pgdata_files);
pgdata_files = NULL;
}
time(&end_time);
pretty_time_interval(difftime(end_time, start_time),
pretty_time, lengthof(pretty_time));
@@ -2033,36 +2110,21 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list,
/* Check that instance is suitable for incremental restore
* Depending on type of incremental restore requirements are differs.
*
* TODO: add PG_CONTROL_IS_MISSING
*/
void
DestDirIncrCompatibility
check_incremental_compatibility(const char *pgdata, uint64 system_identifier,
IncrRestoreMode incremental_mode)
{
uint64 system_id_pgdata;
bool system_id_match = false;
bool success = true;
bool postmaster_is_up = false;
bool backup_label_exists = false;
pid_t pid;
char backup_label[MAXPGPATH];
/* slurp pg_control and check that system ID is the same */
/* check that instance is not running */
/* if lsn_based, check that there is no backup_label files is around AND
* get redo point lsn from destination pg_control.
* It is really important to be sure that pg_control is in cohesion with
* data files content, because based on pg_control information we will
* choose a backup suitable for lsn based incremental restore.
*/
system_id_pgdata = get_system_identifier(pgdata);
if (system_id_pgdata != instance_config.system_identifier)
{
elog(WARNING, "Backup catalog was initialized for system id %lu, "
"but destination directory system id is %lu",
system_identifier, system_id_pgdata);
success = false;
}
/* check postmaster pid */
pid = fio_check_postmaster(pgdata, FIO_DB_HOST);
@@ -2080,8 +2142,28 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier,
elog(WARNING, "Postmaster with pid %u is running in destination directory \"%s\"",
pid, pgdata);
success = false;
postmaster_is_up = true;
}
/* slurp pg_control and check that system ID is the same
* check that instance is not running
* if lsn_based, check that there is no backup_label files is around AND
* get redo point lsn from destination pg_control.
* It is really important to be sure that pg_control is in cohesion with
* data files content, because based on pg_control information we will
* choose a backup suitable for lsn based incremental restore.
*/
system_id_pgdata = get_system_identifier(pgdata);
if (system_id_pgdata == instance_config.system_identifier)
system_id_match = true;
else
elog(WARNING, "Backup catalog was initialized for system id %lu, "
"but destination directory system id is %lu",
system_identifier, system_id_pgdata);
/*
* TODO: maybe there should be some other signs, pointing to pg_control
* desynchronization with cluster state.
@@ -2097,9 +2179,22 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier,
"to cluster with pg_control not synchronized with cluster state."
"Consider to use incremental restore in 'checksum' mode");
success = false;
backup_label_exists = true;
}
}
if (postmaster_is_up)
return POSTMASTER_IS_RUNNING;
if (!system_id_match)
return SYSTEM_ID_MISMATCH;
if (backup_label_exists)
return BACKUP_LABEL_EXISTS;
/* some other error condition */
if (!success)
elog(ERROR, "Incremental restore is impossible");
return DEST_IS_NOT_OK;
return DEST_OK;
}

View File

@@ -2163,9 +2163,9 @@ cleanup:
}
/* Compile the array of files located on remote machine in directory root */
void fio_list_dir(parray *files, const char *root, bool exclude,
bool follow_symlink, bool add_root, bool backup_logs,
bool skip_hidden, int external_dir_num)
static void fio_list_dir_internal(parray *files, const char *root, bool exclude,
bool follow_symlink, bool add_root, bool backup_logs,
bool skip_hidden, int external_dir_num)
{
fio_header hdr;
fio_list_dir_request req;
@@ -2321,6 +2321,19 @@ static void fio_list_dir_impl(int out, char* buf)
IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));
}
/* Wrapper for directory listing */
void fio_list_dir(parray *files, const char *root, bool exclude,
bool follow_symlink, bool add_root, bool backup_logs,
bool skip_hidden, int external_dir_num)
{
if (fio_is_remote(FIO_DB_HOST))
fio_list_dir_internal(files, root, exclude, follow_symlink, add_root,
backup_logs, skip_hidden, external_dir_num);
else
dir_list_file(files, root, exclude, follow_symlink, add_root,
backup_logs, skip_hidden, external_dir_num, FIO_LOCAL_HOST);
}
PageState *
fio_get_checksum_map(const char *fullpath, uint32 checksum_version, int n_blocks,
XLogRecPtr dest_stop_lsn, BlockNumber segmentno, fio_location location)

View File

@@ -698,3 +698,55 @@ do_validate_instance(void)
parray_walk(backups, pgBackupFree);
parray_free(backups);
}
/*
* Validate tablespace_map checksum.
* Error out in case of checksum mismatch.
* Return 'false' if there are no tablespaces in backup.
*
* TODO: it is a bad, that we read the whole filelist just for
* the sake of tablespace_map. Probably pgBackup should come with
* already filled pgBackup.files
*/
bool
validate_tablespace_map(pgBackup *backup)
{
char map_path[MAXPGPATH];
pgFile *dummy = NULL;
pgFile **tablespace_map = NULL;
pg_crc32 crc;
parray *files = get_backup_filelist(backup, true);
parray_qsort(files, pgFileCompareRelPathWithExternal);
join_path_components(map_path, backup->database_dir, PG_TABLESPACE_MAP_FILE);
dummy = pgFileInit(PG_TABLESPACE_MAP_FILE);
tablespace_map = (pgFile **) parray_bsearch(files, dummy, pgFileCompareRelPathWithExternal);
if (!tablespace_map)
{
elog(LOG, "there is no file tablespace_map");
parray_walk(files, pgFileFree);
parray_free(files);
return false;
}
/* Exit if database/tablespace_map doesn't exist */
if (!fileExists(map_path, FIO_BACKUP_HOST))
elog(ERROR, "Tablespace map is missing: \"%s\", "
"probably backup %s is corrupt, validate it",
map_path, base36enc(backup->backup_id));
/* check tablespace map checksumms */
crc = pgFileGetCRC(map_path, true, false);
if ((*tablespace_map)->crc != crc)
elog(ERROR, "Invalid CRC of tablespace map file \"%s\" : %X. Expected %X, "
"probably backup %s is corrupt, validate it",
map_path, crc, (*tablespace_map)->crc, base36enc(backup->backup_id));
pgFileFree(dummy);
parray_walk(files, pgFileFree);
parray_free(files);
return true;
}

View File

@@ -434,6 +434,35 @@ class ProbackupTest(object):
# res[0], 0,
# 'Failed to create tablespace with cmd: {0}'.format(cmd))
def drop_tblspace(self, node, tblspc_name):
res = node.execute(
'postgres',
'select exists'
" (select 1 from pg_tablespace where spcname = '{0}')".format(
tblspc_name)
)
# Check that tablespace with name 'tblspc_name' do not exists already
self.assertTrue(
res[0][0],
'Tablespace "{0}" do not exists'.format(tblspc_name)
)
rels = node.execute(
"postgres",
"SELECT relname FROM pg_class c "
"LEFT JOIN pg_tablespace t ON c.reltablespace = t.oid "
"where c.relkind = 'r' and t.spcname = '{0}'".format(tblspc_name))
for rel in rels:
node.safe_psql(
'postgres',
"DROP TABLE {0}".format(rel[0]))
node.safe_psql(
'postgres',
'DROP TABLESPACE {0}'.format(tblspc_name))
def get_tblspace_path(self, node, tblspc_name):
return os.path.join(node.base_dir, tblspc_name)

View File

@@ -80,9 +80,59 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_basic_incr_restore_into_missing_directory(self):
""""""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'autovacuum': 'off'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.pgbench_init(scale=10)
self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
options=['-T', '10', '-c', '1', '--no-vacuum'])
pgbench.wait()
pgbench.stdout.close()
self.backup_node(backup_dir, 'node', node, backup_type='page')
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
options=['-T', '10', '-c', '1'])
pgbench.wait()
pgbench.stdout.close()
self.backup_node(backup_dir, 'node', node, backup_type='page')
pgdata = self.pgdata_content(node.data_dir)
node.cleanup()
print(self.restore_node(
backup_dir, 'node', node,
options=["-j", "4", "--incremental-mode=checksum"]))
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_checksum_corruption_detection(self):
"""recovery to target timeline"""
"""
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -127,7 +177,8 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
node.stop()
self.restore_node(
backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=lsn"])
backup_dir, 'node', node,
options=["-j", "4", "--incremental-mode=lsn"])
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
@@ -169,7 +220,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=[
"-j", "4", "--incremental-mode=checksum",
"-j", "4", "--incremental-mode=checksum", "--force",
"-T{0}={1}".format(tblspace, some_directory)])
pgdata_restored = self.pgdata_content(node.data_dir)
@@ -255,28 +306,55 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
tblspace = self.get_tblspace_path(node, 'tblspace')
self.create_tblspace_in_node(node, 'tblspace')
node.pgbench_init(scale=10, tablespace='tblspace')
self.backup_node(backup_dir, 'node', node, options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
node_1 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_1'))
node_1.cleanup()
self.restore_node(
# fill node1 with data
out = self.restore_node(
backup_dir, 'node', node,
data_dir=node_1.data_dir,
options=['--incremental-mode=checksum'])
options=['--incremental-mode=checksum', '--force'])
self.restore_node(
self.assertIn("WARNING: Backup catalog was initialized for system id", out)
tblspace = self.get_tblspace_path(node, 'tblspace')
self.create_tblspace_in_node(node, 'tblspace')
node.pgbench_init(scale=5, tablespace='tblspace')
node.safe_psql(
'postgres',
'vacuum')
self.backup_node(backup_dir, 'node', node, backup_type='delta', options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
try:
self.restore_node(
backup_dir, 'node', node,
data_dir=node_1.data_dir,
options=['--incremental-mode=checksum', '-T{0}={1}'.format(tblspace, tblspace)])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because remapped directory is not empty.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: Remapped tablespace destination is not empty',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
out = self.restore_node(
backup_dir, 'node', node,
data_dir=node_1.data_dir,
options=['--incremental-mode=checksum', '-T{0}={1}'.format(tblspace, tblspace)])
options=[
'--force', '--incremental-mode=checksum',
'-T{0}={1}'.format(tblspace, tblspace)])
pgdata_restored = self.pgdata_content(node_1.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
@@ -284,6 +362,301 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_incr_restore_with_tablespace_3(self):
"""
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'tblspace1')
node.pgbench_init(scale=10, tablespace='tblspace1')
# take backup with tblspace1
self.backup_node(backup_dir, 'node', node, options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
self.drop_tblspace(node, 'tblspace1')
self.create_tblspace_in_node(node, 'tblspace2')
node.pgbench_init(scale=10, tablespace='tblspace2')
node.stop()
self.restore_node(
backup_dir, 'node', node,
options=[
"-j", "4",
"--incremental-mode=checksum"])
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_incr_restore_with_tablespace_4(self):
"""
Check that system ID mismatch is detected,
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'tblspace1')
node.pgbench_init(scale=10, tablespace='tblspace1')
# take backup of node1 with tblspace1
self.backup_node(backup_dir, 'node', node, options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
self.drop_tblspace(node, 'tblspace1')
node.cleanup()
# recreate node
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
node.slow_start()
self.create_tblspace_in_node(node, 'tblspace1')
node.pgbench_init(scale=10, tablespace='tblspace1')
node.stop()
try:
self.restore_node(
backup_dir, 'node', node,
options=[
"-j", "4",
"--incremental-mode=checksum"])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because destination directory has wrong system id.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
print(e.message)
self.assertIn(
'WARNING: Backup catalog was initialized for system id',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertIn(
'ERROR: Incremental restore is impossible',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
out = self.restore_node(
backup_dir, 'node', node,
options=[
"-j", "4", "--force",
"--incremental-mode=checksum"])
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
@unittest.skip("skip")
def test_incr_restore_with_tablespace_5(self):
"""
More complicated case, we restore backup
with tablespace, which we remap into directory
with some old content, that belongs to an instance
with different system id.
"""
fname = self.id().split('.')[3]
node1 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node1'),
set_replication=True,
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node1)
node1.slow_start()
self.create_tblspace_in_node(node1, 'tblspace')
node1.pgbench_init(scale=10, tablespace='tblspace')
# take backup of node1 with tblspace
self.backup_node(backup_dir, 'node', node1, options=['--stream'])
pgdata = self.pgdata_content(node1.data_dir)
node1.stop()
# recreate node
node2 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node2'),
set_replication=True,
initdb_params=['--data-checksums'])
node2.slow_start()
self.create_tblspace_in_node(node2, 'tblspace')
node2.pgbench_init(scale=10, tablespace='tblspace')
node2.stop()
tblspc1_path = self.get_tblspace_path(node1, 'tblspace')
tblspc2_path = self.get_tblspace_path(node2, 'tblspace')
out = self.restore_node(
backup_dir, 'node', node1,
options=[
"-j", "4", "--force",
"--incremental-mode=checksum",
"-T{0}={1}".format(tblspc1_path, tblspc2_path)])
# check that tblspc1_path is empty
self.assertFalse(
os.listdir(tblspc1_path),
"Dir is not empty: '{0}'".format(tblspc1_path))
pgdata_restored = self.pgdata_content(node1.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_incr_restore_with_tablespace_6(self):
"""
Empty pgdata, not empty tablespace
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
self.create_tblspace_in_node(node, 'tblspace')
node.pgbench_init(scale=10, tablespace='tblspace')
# take backup of node with tblspace
self.backup_node(backup_dir, 'node', node, options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
node.cleanup()
try:
self.restore_node(
backup_dir, 'node', node,
options=[
"-j", "4",
"--incremental-mode=checksum"])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because there is running postmaster "
"process in destination directory.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: PGDATA is empty, but tablespace destination is not',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
out = self.restore_node(
backup_dir, 'node', node,
options=[
"-j", "4", "--force",
"--incremental-mode=checksum"])
self.assertIn(
"INFO: Destination directory and tablespace directories are empty, "
"disable incremental restore", out)
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_incr_restore_with_tablespace_7(self):
"""
Restore backup without tablespace into
PGDATA with tablespace.
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
# take backup of node with tblspace
self.backup_node(backup_dir, 'node', node, options=['--stream'])
pgdata = self.pgdata_content(node.data_dir)
self.create_tblspace_in_node(node, 'tblspace')
node.pgbench_init(scale=5, tablespace='tblspace')
node.stop()
# try:
# self.restore_node(
# backup_dir, 'node', node,
# options=[
# "-j", "4",
# "--incremental-mode=checksum"])
# # we should die here because exception is what we expect to happen
# self.assertEqual(
# 1, 0,
# "Expecting Error because there is running postmaster "
# "process in destination directory.\n "
# "Output: {0} \n CMD: {1}".format(
# repr(self.output), self.cmd))
# except ProbackupException as e:
# self.assertIn(
# 'ERROR: PGDATA is empty, but tablespace destination is not',
# e.message,
# '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
# repr(e.message), self.cmd))
out = self.restore_node(
backup_dir, 'node', node,
options=[
"-j", "4", "--incremental-mode=checksum"])
print(out)
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_basic_incr_restore_sanity(self):
"""recovery to target timeline"""
@@ -1943,24 +2316,44 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
"-T", "{0}={1}".format(
node_tablespace, node1_tablespace)])
# with open(os.path.join(node1_tablespace, "hello"), "w") as f:
# f.close()
pgdata1 = self.pgdata_content(node1.data_dir)
# partial incremental restore into node2
try:
self.restore_node(
backup_dir, 'node',
node2, options=[
"-I", "checksum",
"--db-exclude=db1",
"--db-exclude=db5",
"-T", "{0}={1}".format(
node_tablespace, node2_tablespace)])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because remapped tablespace contain old data .\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: Remapped tablespace destination is not empty:',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.restore_node(
backup_dir, 'node',
node2, options=[
"-I", "checksum",
"-I", "checksum", "--force",
"--db-exclude=db1",
"--db-exclude=db5",
"-T", "{0}={1}".format(
node_tablespace, node2_tablespace)])
pgdata2 = self.pgdata_content(node2.data_dir)
self.compare_pgdata(pgdata1, pgdata2)
self.set_auto_conf(node2, {'port': node2.port})
node2.slow_start()

View File

@@ -8,6 +8,7 @@ from datetime import datetime, timedelta
import hashlib
import shutil
import json
from shutil import copyfile
from testgres import QueryException
@@ -1021,6 +1022,141 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_restore_with_missing_or_corrupted_tablespace_map(self):
"""restore backup with missing or corrupted tablespace_map"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'],
pg_options={'autovacuum': 'off'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Create tablespace
self.create_tblspace_in_node(node, 'tblspace')
node.pgbench_init(scale=1, tablespace='tblspace')
# Full backup
self.backup_node(backup_dir, 'node', node)
# Change some data
pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
pgbench.wait()
# Page backup
page_id = self.backup_node(backup_dir, 'node', node, backup_type="page")
pgdata = self.pgdata_content(node.data_dir)
node2 = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node2'))
node2.cleanup()
olddir = self.get_tblspace_path(node, 'tblspace')
newdir = self.get_tblspace_path(node2, 'tblspace')
# drop tablespace_map
tablespace_map = os.path.join(
backup_dir, 'backups', 'node',
page_id, 'database', 'tablespace_map')
tablespace_map_tmp = os.path.join(
backup_dir, 'backups', 'node',
page_id, 'database', 'tablespace_map_tmp')
os.rename(tablespace_map, tablespace_map_tmp)
try:
self.restore_node(
backup_dir, 'node', node2,
options=["-T", "{0}={1}".format(olddir, newdir)])
self.assertEqual(
1, 0,
"Expecting Error because tablespace_map is missing.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: Tablespace map is missing: "{0}", '
'probably backup {1} is corrupt, validate it'.format(
tablespace_map, page_id),
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
try:
self.restore_node(backup_dir, 'node', node2)
self.assertEqual(
1, 0,
"Expecting Error because tablespace_map is missing.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: Tablespace map is missing: "{0}", '
'probably backup {1} is corrupt, validate it'.format(
tablespace_map, page_id),
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
copyfile(tablespace_map_tmp, tablespace_map)
with open(tablespace_map, "a") as f:
f.write("HELLO\n")
print(tablespace_map)
exit(1)
try:
self.restore_node(
backup_dir, 'node', node2,
options=["-T", "{0}={1}".format(olddir, newdir)])
self.assertEqual(
1, 0,
"Expecting Error because tablespace_map is corupted.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: Invalid CRC of tablespace map file "{0}"'.format(tablespace_map),
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
try:
self.restore_node(backup_dir, 'node', node2)
self.assertEqual(
1, 0,
"Expecting Error because tablespace_map is corupted.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
'ERROR: Invalid CRC of tablespace map file "{0}"'.format(tablespace_map),
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# rename it back
os.rename(tablespace_map_tmp, tablespace_map)
print(self.restore_node(
backup_dir, 'node', node2,
options=["-T", "{0}={1}".format(olddir, newdir)]))
pgdata_restored = self.pgdata_content(node2.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_archive_node_backup_stream_restore_to_recovery_time(self):
"""

View File

@@ -82,7 +82,7 @@ class TimeStamp(ProbackupTest, unittest.TestCase):
self.del_test_dir(module_name, fname)
def test_handling_of_TZ_env_variable(self):
"""Issue #112"""
"""Issue #284"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),