1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-02-02 13:36:08 +02:00

Merge branch 'master' into pgpro-2573

This commit is contained in:
Grigory Smolkin 2019-03-28 18:45:40 +03:00
commit 366d36b9a9
16 changed files with 586 additions and 194 deletions

View File

@ -386,25 +386,18 @@ catalog_get_backup_list(time_t requested_backup_id)
/* Link incremental backups with their ancestors.*/
for (i = 0; i < parray_num(backups); i++)
{
pgBackup *curr = parray_get(backups, i);
int j;
pgBackup *curr = parray_get(backups, i);
pgBackup **ancestor;
pgBackup key;
if (curr->backup_mode == BACKUP_MODE_FULL)
continue;
for (j = i+1; j < parray_num(backups); j++)
{
pgBackup *ancestor = parray_get(backups, j);
if (ancestor->start_time == curr->parent_backup)
{
curr->parent_backup_link = ancestor;
/* elog(INFO, "curr %s, ancestor %s j=%d", base36enc_dup(curr->start_time),
base36enc_dup(ancestor->start_time), j); */
break;
}
}
key.start_time = curr->parent_backup;
ancestor = (pgBackup **) parray_bsearch(backups, &key,
pgBackupCompareIdDesc);
if (ancestor)
curr->parent_backup_link = *ancestor;
}
return backups;
@ -603,7 +596,7 @@ write_backup(pgBackup *backup)
int errno_temp;
pgBackupGetPath(backup, path, lengthof(path), BACKUP_CONTROL_FILE);
snprintf(path_temp, sizeof(path_temp), "%s.partial", path);
snprintf(path_temp, sizeof(path_temp), "%s.tmp", path);
fp = fopen(path_temp, "wt");
if (fp == NULL)
@ -644,7 +637,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root,
int errno_temp;
pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST);
snprintf(path_temp, sizeof(path_temp), "%s.partial", path);
snprintf(path_temp, sizeof(path_temp), "%s.tmp", path);
fp = fopen(path_temp, "wt");
if (fp == NULL)
@ -728,7 +721,7 @@ readBackupControlFile(const char *path)
return NULL;
}
parsed_options = config_read_opt(path, options, WARNING, true);
parsed_options = config_read_opt(path, options, WARNING, true, true);
if (parsed_options == 0)
{
@ -1083,8 +1076,12 @@ find_parent_full_backup(pgBackup *current_backup)
if (base_full_backup->backup_mode != BACKUP_MODE_FULL)
{
elog(WARNING, "Failed to find FULL backup parent for %s",
base36enc(current_backup->start_time));
if (base_full_backup->parent_backup)
elog(WARNING, "Backup %s is missing",
base36enc(base_full_backup->parent_backup));
else
elog(WARNING, "Failed to find parent FULL backup for %s",
base36enc(current_backup->start_time));
return NULL;
}

View File

@ -9,6 +9,8 @@
#include "pg_probackup.h"
#include <unistd.h>
#include "utils/configuration.h"
#include "utils/json.h"
@ -213,16 +215,22 @@ do_show_config(void)
* values into the file.
*/
void
do_set_config(void)
do_set_config(bool missing_ok)
{
char path[MAXPGPATH];
char path_temp[MAXPGPATH];
FILE *fp;
int i;
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
fp = fopen(path, "wt");
snprintf(path_temp, sizeof(path_temp), "%s.tmp", path);
if (!missing_ok && !fileExists(path))
elog(ERROR, "Configuration file \"%s\" doesn't exist", path);
fp = fopen(path_temp, "wt");
if (fp == NULL)
elog(ERROR, "cannot create %s: %s",
elog(ERROR, "Cannot create configuration file \"%s\": %s",
BACKUP_CATALOG_CONF_FILE, strerror(errno));
current_group = NULL;
@ -253,6 +261,14 @@ do_set_config(void)
}
fclose(fp);
if (rename(path_temp, path) < 0)
{
int errno_temp = errno;
unlink(path_temp);
elog(ERROR, "Cannot rename configuration file \"%s\" to \"%s\": %s",
path_temp, path, strerror(errno_temp));
}
}
void

View File

@ -29,75 +29,57 @@ do_delete(time_t backup_id)
parray *backup_list,
*delete_list;
pgBackup *target_backup = NULL;
time_t parent_id = 0;
XLogRecPtr oldest_lsn = InvalidXLogRecPtr;
TimeLineID oldest_tli = 0;
/* Get complete list of backups */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
if (backup_id != 0)
delete_list = parray_new();
/* Find backup to be deleted and make increment backups array to be deleted */
for (i = 0; i < parray_num(backup_list); i++)
{
delete_list = parray_new();
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
/* Find backup to be deleted and make increment backups array to be deleted */
for (i = (int) parray_num(backup_list) - 1; i >= 0; i--)
if (backup->start_time == backup_id)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, (size_t) i);
if (backup->start_time == backup_id)
{
parray_append(delete_list, backup);
/*
* Do not remove next backups, if target backup was finished
* incorrectly.
*/
if (backup->status == BACKUP_STATUS_ERROR)
break;
/* Save backup id to retreive increment backups */
parent_id = backup->start_time;
target_backup = backup;
}
else if (target_backup)
{
/* TODO: Current algorithm is imperfect, it assume that backup
* can sire only one incremental chain
*/
if (backup->backup_mode != BACKUP_MODE_FULL &&
backup->parent_backup == parent_id)
{
/* Append to delete list increment backup */
parray_append(delete_list, backup);
/* Save backup id to retreive increment backups */
parent_id = backup->start_time;
}
else
break;
}
target_backup = backup;
break;
}
if (parray_num(delete_list) == 0)
elog(ERROR, "no backup found, cannot delete");
catalog_lock_backup_list(delete_list, parray_num(delete_list) - 1, 0);
/* Delete backups from the end of list */
for (i = (int) parray_num(delete_list) - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(delete_list, (size_t) i);
if (interrupted)
elog(ERROR, "interrupted during delete backup");
delete_backup_files(backup);
}
parray_free(delete_list);
}
/* sanity */
if (!target_backup)
elog(ERROR, "Failed to find backup %s, cannot delete", base36enc(backup_id));
/* form delete list */
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
/* check if backup is descendant of delete target */
if (is_parent(target_backup->start_time, backup, false))
parray_append(delete_list, backup);
}
parray_append(delete_list, target_backup);
/* Lock marked for delete backups */
catalog_lock_backup_list(delete_list, parray_num(delete_list) - 1, 0);
/* Delete backups from the end of list */
for (i = (int) parray_num(delete_list) - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(delete_list, (size_t) i);
if (interrupted)
elog(ERROR, "interrupted during delete backup");
delete_backup_files(backup);
}
parray_free(delete_list);
/* Clean WAL segments */
if (delete_wal)
{
@ -670,6 +652,9 @@ delete_backup_files(pgBackup *backup)
elog(INFO, "Progress: (%zd/%zd). Process file \"%s\"",
i + 1, num_files, file->path);
if (interrupted)
elog(ERROR, "interrupted during delete backup");
pgFileDelete(file);
}

View File

@ -104,7 +104,7 @@ do_add_instance(void)
config_set_opt(instance_options, &instance_config.xlog_seg_size,
SOURCE_FILE);
/* pgdata was set through command line */
do_set_config();
do_set_config(true);
elog(INFO, "Instance '%s' successfully inited", instance_name);
return 0;

View File

@ -52,12 +52,10 @@ void
do_merge(time_t backup_id)
{
parray *backups;
parray *merge_list = parray_new();
pgBackup *dest_backup = NULL;
pgBackup *full_backup = NULL;
time_t prev_parent = INVALID_BACKUP_ID;
int i;
int dest_backup_idx = 0;
int full_backup_idx = 0;
if (backup_id == INVALID_BACKUP_ID)
elog(ERROR, "required parameter is not specified: --backup-id");
@ -70,73 +68,79 @@ do_merge(time_t backup_id)
/* Get list of all backups sorted in order of descending start time */
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
/* Find destination and parent backups */
/* Find destination backup first */
for (i = 0; i < parray_num(backups); i++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
if (backup->start_time > backup_id)
continue;
else if (backup->start_time == backup_id && !dest_backup)
/* found target */
if (backup->start_time == backup_id)
{
/* sanity */
if (backup->status != BACKUP_STATUS_OK &&
/* It is possible that previous merging was interrupted */
backup->status != BACKUP_STATUS_MERGING &&
backup->status != BACKUP_STATUS_DELETING)
elog(ERROR, "Backup %s has status: %s",
base36enc(backup->start_time), status2str(backup->status));
elog(ERROR, "Backup %s has status: %s",
base36enc(backup->start_time), status2str(backup->status));
if (backup->backup_mode == BACKUP_MODE_FULL)
elog(ERROR, "Backup %s is full backup",
base36enc(backup->start_time));
dest_backup = backup;
dest_backup_idx = i;
break;
}
else
{
if (dest_backup == NULL)
elog(ERROR, "Target backup %s was not found", base36enc(backup_id));
if (backup->start_time != prev_parent)
continue;
if (backup->status != BACKUP_STATUS_OK &&
/* It is possible that previous merging was interrupted */
backup->status != BACKUP_STATUS_MERGING)
elog(ERROR, "Backup %s has status: %s",
base36enc(backup->start_time), status2str(backup->status));
/* If we already found dest_backup, look for full backup */
if (dest_backup && backup->backup_mode == BACKUP_MODE_FULL)
{
full_backup = backup;
full_backup_idx = i;
/* Found target and full backups, so break the loop */
break;
}
}
prev_parent = backup->parent_backup;
}
/* sanity */
if (dest_backup == NULL)
elog(ERROR, "Target backup %s was not found", base36enc(backup_id));
/* get full backup */
full_backup = find_parent_full_backup(dest_backup);
/* sanity */
if (full_backup == NULL)
elog(ERROR, "Parent full backup for the given backup %s was not found",
base36enc(backup_id));
Assert(full_backup_idx != dest_backup_idx);
/* sanity */
if (full_backup->status != BACKUP_STATUS_OK &&
/* It is possible that previous merging was interrupted */
full_backup->status != BACKUP_STATUS_MERGING)
elog(ERROR, "Backup %s has status: %s",
base36enc(full_backup->start_time), status2str(full_backup->status));
catalog_lock_backup_list(backups, full_backup_idx, dest_backup_idx);
//Assert(full_backup_idx != dest_backup_idx);
/* form merge list */
while(dest_backup->parent_backup_link)
{
/* sanity */
if (dest_backup->status != BACKUP_STATUS_OK &&
/* It is possible that previous merging was interrupted */
dest_backup->status != BACKUP_STATUS_MERGING &&
dest_backup->status != BACKUP_STATUS_DELETING)
elog(ERROR, "Backup %s has status: %s",
base36enc(dest_backup->start_time), status2str(dest_backup->status));
parray_append(merge_list, dest_backup);
dest_backup = dest_backup->parent_backup_link;
}
/* Add FULL backup for easy locking */
parray_append(merge_list, full_backup);
/* Lock merge chain */
catalog_lock_backup_list(merge_list, parray_num(merge_list) - 1, 0);
/*
* Found target and full backups, merge them and intermediate backups
*/
for (i = full_backup_idx; i > dest_backup_idx; i--)
for (i = parray_num(merge_list) - 2; i >= 0; i--)
{
pgBackup *from_backup = (pgBackup *) parray_get(backups, i - 1);
pgBackup *from_backup = (pgBackup *) parray_get(merge_list, i);
merge_backups(full_backup, from_backup);
}
@ -148,6 +152,7 @@ do_merge(time_t backup_id)
/* cleanup */
parray_walk(backups, pgBackupFree);
parray_free(backups);
parray_free(merge_list);
elog(INFO, "Merge of backup %s completed", base36enc(backup_id));
}

View File

@ -386,8 +386,12 @@ main(int argc, char *argv[])
config_get_opt_env(instance_options);
/* Read options from configuration file */
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
config_read_opt(path, instance_options, ERROR, true);
if (backup_subcmd != ADD_INSTANCE_CMD)
{
join_path_components(path, backup_instance_path,
BACKUP_CATALOG_CONF_FILE);
config_read_opt(path, instance_options, ERROR, true, false);
}
}
/* Initialize logger */
@ -530,7 +534,7 @@ main(int argc, char *argv[])
do_show_config();
break;
case SET_CONFIG_CMD:
do_set_config();
do_set_config(false);
break;
case NO_CMD:
/* Should not happen */

View File

@ -445,7 +445,7 @@ extern int do_archive_get(char *wal_file_path, char *wal_file_name);
/* in configure.c */
extern void do_show_config(void);
extern void do_set_config(void);
extern void do_set_config(bool missing_ok);
extern void init_config(InstanceConfig *config);
/* in show.c */

View File

@ -55,9 +55,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
pgBackup *dest_backup = NULL;
pgBackup *base_full_backup = NULL;
pgBackup *corrupted_backup = NULL;
int dest_backup_index = 0;
int base_full_backup_index = 0;
int corrupted_backup_index = 0;
char *action = is_restore ? "Restore":"Validate";
parray *parent_chain = NULL;
@ -179,8 +176,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
if (dest_backup == NULL)
elog(ERROR, "Backup satisfying target options is not found.");
dest_backup_index = get_backup_index_number(backups, dest_backup);
/* If we already found dest_backup, look for full backup. */
if (dest_backup->backup_mode == BACKUP_MODE_FULL)
base_full_backup = dest_backup;
@ -201,7 +196,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
missing_backup_start_time = tmp_backup->parent_backup;
missing_backup_id = base36enc_dup(tmp_backup->parent_backup);
for (j = get_backup_index_number(backups, tmp_backup); j >= 0; j--)
for (j = 0; j < parray_num(backups); j++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
@ -235,7 +230,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/* parent_backup_id contain human-readable backup ID of oldest invalid backup */
parent_backup_id = base36enc_dup(tmp_backup->start_time);
for (j = get_backup_index_number(backups, tmp_backup) - 1; j >= 0; j--)
for (j = 0; j < parray_num(backups); j++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
@ -261,6 +256,11 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
}
}
tmp_backup = find_parent_full_backup(dest_backup);
/* sanity */
if (!tmp_backup)
elog(ERROR, "Parent full backup for the given backup %s was not found",
base36enc(dest_backup->start_time));
}
/*
@ -276,8 +276,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
if (base_full_backup == NULL)
elog(ERROR, "Full backup satisfying target options is not found.");
base_full_backup_index = get_backup_index_number(backups, base_full_backup);
/*
* Ensure that directories provided in tablespace mapping are valid
* i.e. empty or not exist.
@ -297,17 +295,16 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/* Take every backup that is a child of base_backup AND parent of dest_backup
* including base_backup and dest_backup
*/
for (i = base_full_backup_index; i >= dest_backup_index; i--)
{
tmp_backup = (pgBackup *) parray_get(backups, i);
if (is_parent(base_full_backup->start_time, tmp_backup, true) &&
is_parent(tmp_backup->start_time, dest_backup, true))
{
parray_append(parent_chain, tmp_backup);
}
tmp_backup = dest_backup;
while(tmp_backup->parent_backup_link)
{
parray_append(parent_chain, tmp_backup);
tmp_backup = tmp_backup->parent_backup_link;
}
parray_append(parent_chain, base_full_backup);
/* for validation or restore with enabled validation */
if (!is_restore || !rt->restore_no_validate)
{
@ -317,7 +314,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/*
* Validate backups from base_full_backup to dest_backup.
*/
for (i = 0; i < parray_num(parent_chain); i++)
for (i = parray_num(parent_chain) - 1; i >= 0; i--)
{
tmp_backup = (pgBackup *) parray_get(parent_chain, i);
@ -344,10 +341,6 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
if (tmp_backup->status != BACKUP_STATUS_OK)
{
corrupted_backup = tmp_backup;
/* we need corrupted backup index from 'backups' not parent_chain
* so we can properly orphanize all its descendants
*/
corrupted_backup_index = get_backup_index_number(backups, corrupted_backup);
break;
}
/* We do not validate WAL files of intermediate backups
@ -373,7 +366,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
char *corrupted_backup_id;
corrupted_backup_id = base36enc_dup(corrupted_backup->start_time);
for (j = corrupted_backup_index - 1; j >= 0; j--)
for (j = 0; j < parray_num(backups); j++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
@ -418,7 +411,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
*/
if (is_restore)
{
for (i = 0; i < parray_num(parent_chain); i++)
for (i = parray_num(parent_chain) - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(parent_chain, i);

View File

@ -521,7 +521,7 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[],
*/
int
config_read_opt(const char *path, ConfigOption options[], int elevel,
bool strict)
bool strict, bool missing_ok)
{
FILE *fp;
char buf[1024];
@ -532,7 +532,7 @@ config_read_opt(const char *path, ConfigOption options[], int elevel,
if (!options)
return parsed_options;
if ((fp = pgut_fopen(path, "rt", true)) == NULL)
if ((fp = pgut_fopen(path, "rt", missing_ok)) == NULL)
return parsed_options;
while (fgets(buf, lengthof(buf), fp))

View File

@ -78,7 +78,7 @@ struct ConfigOption
extern int config_get_opt(int argc, char **argv, ConfigOption cmd_options[],
ConfigOption options[]);
extern int config_read_opt(const char *path, ConfigOption options[], int elevel,
bool strict);
bool strict, bool missing_ok);
extern void config_get_opt_env(ConfigOption options[]);
extern void config_set_opt(ConfigOption options[], void *var,
OptionSource source);

View File

@ -337,7 +337,13 @@ do_validate_all(void)
sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name);
join_path_components(conf_path, backup_instance_path,
BACKUP_CATALOG_CONF_FILE);
config_read_opt(conf_path, instance_options, ERROR, false);
if (config_read_opt(conf_path, instance_options, ERROR, false,
true) == 0)
{
elog(WARNING, "Configuration file \"%s\" is empty", conf_path);
corrupted_backup_found = true;
continue;
}
do_validate_instance();
}
@ -452,6 +458,11 @@ do_validate_instance(void)
continue;
}
base_full_backup = find_parent_full_backup(current_backup);
/* sanity */
if (!base_full_backup)
elog(ERROR, "Parent full backup for the given backup %s was not found",
base36enc(current_backup->start_time));
}
/* chain is whole, all parents are valid at first glance,
* current backup validation can proceed
@ -568,7 +579,7 @@ do_validate_instance(void)
if (backup->status == BACKUP_STATUS_OK)
{
//tmp_backup = find_parent_full_backup(dest_backup);
/* Revalidation successful, validate corresponding WAL files */
validate_wal(backup, arclog_path, 0,
0, 0, current_backup->tli,

View File

@ -1,7 +1,7 @@
import unittest
from . import init_test, merge, option_test, show_test, compatibility, \
backup_test, delete_test, delta, restore, validate_test, \
backup_test, delete, delta, restore, validate, \
retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \
compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \
cfs_validate_backup, auth_test, time_stamp, snapfs, logging, \
@ -19,7 +19,7 @@ def load_tests(loader, tests, pattern):
# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup))
# suite.addTests(loader.loadTestsFromModule(logging))
suite.addTests(loader.loadTestsFromModule(compression))
suite.addTests(loader.loadTestsFromModule(delete_test))
suite.addTests(loader.loadTestsFromModule(delete))
suite.addTests(loader.loadTestsFromModule(delta))
suite.addTests(loader.loadTestsFromModule(exclude))
suite.addTests(loader.loadTestsFromModule(false_positive))
@ -36,7 +36,7 @@ def load_tests(loader, tests, pattern):
suite.addTests(loader.loadTestsFromModule(retention))
suite.addTests(loader.loadTestsFromModule(show_test))
suite.addTests(loader.loadTestsFromModule(snapfs))
suite.addTests(loader.loadTestsFromModule(validate_test))
suite.addTests(loader.loadTestsFromModule(validate))
suite.addTests(loader.loadTestsFromModule(pgpro560))
suite.addTests(loader.loadTestsFromModule(pgpro589))
suite.addTests(loader.loadTestsFromModule(pgpro2068))

View File

@ -250,3 +250,282 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_interleaved_incremental_chains(self):
"""complicated case of interleaved backup chains"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL BACKUPs
backup_id_a = self.backup_node(backup_dir, 'node', node)
backup_id_b = self.backup_node(backup_dir, 'node', node)
# Change FULL B backup status to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
# FULLb ERROR
# FULLa OK
# Take PAGEa1 backup
page_id_a1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa1 OK
# FULLb ERROR
# FULLa OK
# Change FULL B backup status to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
page_id_b1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb1 OK
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# Now we start to play with first generation of PAGE backups
# Change PAGEb1 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
# Change PAGEa1 status to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
page_id_a2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEa2 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
# Change PAGEb1 status to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change PAGEa2 status to OK
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
# PAGEb2 OK
# PAGEa2 OK
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, backup_type='page')
# PAGEc1 OK
# FULLc OK
# PAGEb2 OK
# PAGEa2 OK
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Delete FULLb
self.delete_pb(
backup_dir, 'node', backup_id_b)
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 5)
print(self.show_pb(
backup_dir, 'node', as_json=False, as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_multiple_descendants(self):
"""
PAGEb3
| PAGEa3
PAGEb2 /
| PAGEa2 /
PAGEb1 \ /
| PAGEa1
FULLb |
FULLa should be deleted
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL BACKUPs
backup_id_a = self.backup_node(backup_dir, 'node', node)
backup_id_b = self.backup_node(backup_dir, 'node', node)
# Change FULLb backup status to ERROR
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
page_id_a1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Change FULLb backup status to OK
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
# Change PAGEa1 backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
page_id_b1 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb1 OK
# PAGEa1 ERROR
# FULLb OK
# FULLa OK
# Change PAGEa1 backup status to OK
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
# Change PAGEb1 backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
page_id_a2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa2 OK
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEb1 backup status to OK
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
# Change PAGEa2 backup status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
page_id_b2 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb2 OK
# PAGEa2 ERROR
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEb2 and PAGEb1 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
# PAGEb2 ERROR
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
page_id_a3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEa3 OK
# PAGEb2 ERROR
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEa3 status to ERROR
self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR')
# Change PAGEb2 status to OK
self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK')
page_id_b3 = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# PAGEb3 OK
# PAGEa3 ERROR
# PAGEb2 OK
# PAGEa2 ERROR
# PAGEb1 ERROR
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Change PAGEa3, PAGEa2 and PAGEb1 status to OK
self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
# PAGEb3 OK
# PAGEa3 OK
# PAGEb2 OK
# PAGEa2 OK
# PAGEb1 OK
# PAGEa1 OK
# FULLb OK
# FULLa OK
# Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1
self.assertEqual(
self.show_pb(backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'],
page_id_a1)
self.assertEqual(
self.show_pb(backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'],
page_id_a1)
# Delete FULLa
self.delete_pb(backup_dir, 'node', backup_id_a)
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -4,6 +4,7 @@ import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
import shutil
from datetime import datetime, timedelta
module_name = "merge"
@ -1202,11 +1203,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica'
}
)
set_replication=True, initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -1257,20 +1254,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
backup_id_deleted = self.show_pb(backup_dir, "node")[1]["id"]
# Try to continue failed MERGE
try:
self.merge_backup(backup_dir, "node", backup_id)
self.assertEqual(
1, 0,
"Expecting Error because of backup corruption.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"ERROR: Backup {0} has status: DELETING".format(
backup_id_deleted) in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.merge_backup(backup_dir, "node", backup_id)
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -1616,7 +1600,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
initdb_params=['--data-checksums'],
pg_options={'autovacuum': 'off'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -1638,6 +1623,10 @@ class MergeTest(ProbackupTest, unittest.TestCase):
'postgres',
"select pg_relation_filepath('pgbench_accounts')").rstrip()
node.safe_psql(
'postgres',
"VACUUM pgbench_accounts")
vm_path = path + '_vm'
# DELTA backup
@ -1673,29 +1662,83 @@ class MergeTest(ProbackupTest, unittest.TestCase):
backup_dir, 'backups',
'node', full_id, 'database', vm_path)
print(file_to_remove)
os.remove(file_to_remove)
# Try to continue failed MERGE
try:
self.merge_backup(backup_dir, "node", backup_id)
self.assertEqual(
1, 0,
"Expecting Error because of backup corruption.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"ERROR: Merging of backup {0} failed".format(
backup_id) in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.merge_backup(backup_dir, "node", backup_id)
self.assertEqual(
'CORRUPT', self.show_pb(backup_dir, 'node')[0]['status'])
'OK', self.show_pb(backup_dir, 'node')[0]['status'])
# self.del_test_dir(module_name, fname)
node.cleanup()
self.restore_node(backup_dir, 'node', node)
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_merge_backup_from_future(self):
"""
take FULL backup, table PAGE backup from future,
try to merge page with FULL
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'autovacuum': 'off'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL
self.backup_node(backup_dir, 'node', node)
node.pgbench_init(scale=3)
# Take PAGE from future
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
with open(
os.path.join(
backup_dir, 'backups', 'node',
backup_id, "backup.control"), "a") as conf:
conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() + timedelta(days=3)))
# rename directory
new_id = self.show_pb(backup_dir, 'node')[1]['id']
os.rename(
os.path.join(backup_dir, 'backups', 'node', backup_id),
os.path.join(backup_dir, 'backups', 'node', new_id))
pgbench = node.pgbench(options=['-T', '3', '-c', '2', '--no-vacuum'])
pgbench.wait()
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
pgdata = self.pgdata_content(node.data_dir)
node.cleanup()
self.merge_backup(backup_dir, 'node', backup_id=backup_id)
self.restore_node(backup_dir, 'node', node, backup_id=backup_id)
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# 1. always use parent link when merging (intermediates may be from different chain)
# 2. page backup we are merging with may disappear after failed merge,

View File

@ -5,6 +5,7 @@ import subprocess
from datetime import datetime
import sys
from time import sleep
from datetime import datetime, timedelta
module_name = 'restore'
@ -1724,3 +1725,60 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_restore_backup_from_future(self):
"""more complex test_restore_chain()"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# Take FULL
self.backup_node(backup_dir, 'node', node)
node.pgbench_init(scale=3)
#pgbench = node.pgbench(options=['-T', '20', '-c', '2'])
#pgbench.wait()
# Take PAGE from future
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
with open(
os.path.join(
backup_dir, 'backups', 'node',
backup_id, "backup.control"), "a") as conf:
conf.write("start-time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() + timedelta(days=3)))
# rename directory
new_id = self.show_pb(backup_dir, 'node')[1]['id']
os.rename(
os.path.join(backup_dir, 'backups', 'node', backup_id),
os.path.join(backup_dir, 'backups', 'node', new_id))
pgbench = node.pgbench(options=['-T', '3', '-c', '2', '--no-vacuum'])
pgbench.wait()
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
pgdata = self.pgdata_content(node.data_dir)
node.cleanup()
self.restore_node(backup_dir, 'node', node, backup_id=backup_id)
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -146,6 +146,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
con.commit()
target_xid = res[0][0]
self.switch_wal_segment(node)
time.sleep(5)
self.assertIn(
"INFO: Backup validation completed successfully",