1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2024-11-25 09:01:48 +02:00

[Issue #143] review feedback implementation

This commit is contained in:
Grigory Smolkin 2020-04-03 20:03:01 +03:00
parent 4a94fdaa33
commit 4d51c7f549
7 changed files with 32 additions and 26 deletions

View File

@ -615,7 +615,7 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current
switch (scan_parent_chain(backup, &tmp_backup))
{
/* broken chain */
case 0:
case ChainIsBroken:
invalid_backup_id = base36enc_dup(tmp_backup->parent_backup);
elog(WARNING, "Backup %s has missing parent: %s. Cannot be a parent",
@ -624,7 +624,7 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current
continue;
/* chain is intact, but at least one parent is invalid */
case 1:
case ChainIsInvalid:
invalid_backup_id = base36enc_dup(tmp_backup->start_time);
elog(WARNING, "Backup %s has invalid parent: %s. Cannot be a parent",
@ -633,7 +633,7 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current
continue;
/* chain is ok */
case 2:
case ChainIsOk:
/* Yes, we could call is_parent() earlier - after choosing the ancestor,
* but this way we have an opportunity to detect and report all possible
* anomalies.
@ -755,13 +755,14 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list,
/* Optimistically, look on current timeline for valid incremental backup, child of ancestor */
if (my_tlinfo->backups)
{
/* backups are sorted in descending order and we need latest valid */
for (i = 0; i < parray_num(my_tlinfo->backups); i++)
{
pgBackup *tmp_backup = NULL;
pgBackup *backup = (pgBackup *) parray_get(my_tlinfo->backups, i);
/* found suitable parent */
if (scan_parent_chain(backup, &tmp_backup) == 2 &&
if (scan_parent_chain(backup, &tmp_backup) == ChainIsOk &&
is_parent(ancestor_backup->start_time, backup, false))
return backup;
}
@ -786,7 +787,7 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list,
if (backup->stop_lsn > tmp_tlinfo->switchpoint)
continue;
if (scan_parent_chain(backup, &tmp_backup) == 2 &&
if (scan_parent_chain(backup, &tmp_backup) == ChainIsOk &&
is_parent(ancestor_backup->start_time, backup, true))
return backup;
}
@ -2382,18 +2383,18 @@ scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup)
{
/* Set oldest child backup in chain */
*result_backup = target_backup;
return 0;
return ChainIsBroken;
}
/* chain is ok, but some backups are invalid */
if (invalid_backup)
{
*result_backup = invalid_backup;
return 1;
return ChainIsInvalid;
}
*result_backup = target_backup;
return 2;
return ChainIsOk;
}
/*

View File

@ -859,23 +859,23 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
char xlogfname[MAXFNAMELEN];
char partial_file[MAXPGPATH];
GetXLogFileName(xlogfname, reader_data->tli, reader_data->xlogsegno,
wal_seg_size);
snprintf(reader_data->xlogpath, MAXPGPATH, "%s/%s", wal_archivedir,
xlogfname);
snprintf(partial_file, MAXPGPATH, "%s/%s.partial", wal_archivedir,
xlogfname);
snprintf(reader_data->gz_xlogpath, sizeof(reader_data->gz_xlogpath),
"%s.gz", reader_data->xlogpath);
GetXLogFileName(xlogfname, reader_data->tli, reader_data->xlogsegno, wal_seg_size);
snprintf(reader_data->xlogpath, MAXPGPATH, "%s/%s", wal_archivedir, xlogfname);
snprintf(reader_data->gz_xlogpath, MAXPGPATH, "%s.gz", reader_data->xlogpath);
/* We fall back to using .partial segment in case if we are running
* multi-timeline incremental backup right after standby promotion.
* TODO: it should be explicitly enabled.
*/
snprintf(partial_file, MAXPGPATH, "%s.partial", reader_data->xlogpath);
/* If segment do not exists, but the same
* segment with '.partial' suffix does, use it instead */
if (!fileExists(reader_data->xlogpath, FIO_BACKUP_HOST) &&
fileExists(partial_file, FIO_BACKUP_HOST))
{
snprintf(reader_data->xlogpath, MAXPGPATH, "%s/%s.partial", wal_archivedir,
xlogfname);
strncpy(reader_data->xlogpath, partial_file, MAXPGPATH);
snprintf(reader_data->xlogpath, MAXPGPATH, "%s", partial_file);
}
if (fileExists(reader_data->xlogpath, FIO_BACKUP_HOST))

View File

@ -800,6 +800,11 @@ extern int pgBackupCompareIdEqual(const void *l, const void *r);
extern pgBackup* find_parent_full_backup(pgBackup *current_backup);
extern int scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup);
/* return codes for scan_parent_chain */
#define ChainIsBroken 0
#define ChainIsInvalid 1
#define ChainIsOk 2
extern bool is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive);
extern bool is_prolific(parray *backup_list, pgBackup *target_backup);
extern bool in_backup_list(parray *backup_list, pgBackup *target_backup);

View File

@ -251,7 +251,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
result = scan_parent_chain(dest_backup, &tmp_backup);
if (result == 0)
if (result == ChainIsBroken)
{
/* chain is broken, determine missing backup ID
* and orphinize all his descendants
@ -290,7 +290,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
/* No point in doing futher */
elog(ERROR, "%s of backup %s failed.", action, base36enc(dest_backup->start_time));
}
else if (result == 1)
else if (result == ChainIsInvalid)
{
/* chain is intact, but at least one parent is invalid */
set_orphan_status(backups, tmp_backup);

View File

@ -479,7 +479,7 @@ do_validate_instance(void)
result = scan_parent_chain(current_backup, &tmp_backup);
/* chain is broken */
if (result == 0)
if (result == ChainIsBroken)
{
char *parent_backup_id;
/* determine missing backup ID */
@ -505,7 +505,7 @@ do_validate_instance(void)
continue;
}
/* chain is whole, but at least one parent is invalid */
else if (result == 1)
else if (result == ChainIsInvalid)
{
/* Oldest corrupt backup has a chance for revalidation */
if (current_backup->start_time != tmp_backup->start_time)
@ -630,7 +630,7 @@ do_validate_instance(void)
*/
result = scan_parent_chain(backup, &tmp_backup);
if (result == 1)
if (result == ChainIsInvalid)
{
/* revalidation make sense only if oldest invalid backup is current_backup
*/

View File

@ -819,7 +819,7 @@ class PageTest(ProbackupTest, unittest.TestCase):
self.backup_node(backup_dir, 'node', node)
# make some wals
node.pgbench_init(scale=4)
node.pgbench_init(scale=10)
# delete last wal segment
wals_dir = os.path.join(backup_dir, 'wal', 'node')

View File

@ -1786,7 +1786,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
self.assertTrue(
'LOG: archive command failed with exit code 1' in log_content and
'DETAIL: The failed archive command was:' in log_content and
'INFO: pg_probackup archive-push from' in log_content,
'WAL file already exists in archive with different checksum' in log_content,
'Expecting error messages about failed archive_command'
)
self.assertFalse(