mirror of
https://github.com/pgbackrest/pgbackrest.git
synced 2024-12-14 10:13:05 +02:00
Recopy during backup when resumed file is missing or corrupt.
A recopy would occur if the size or checksum was invalid but on error the backup would terminate. Instead, recopy the resumed file on any error. If the error is systemic (e.g. network failure) then it should show up again during the recopy.
This commit is contained in:
parent
d3f717c892
commit
1b3770e248
@ -133,40 +133,51 @@ backupFile(
|
||||
}
|
||||
else if (!delta || pgFileMatch)
|
||||
{
|
||||
// Generate checksum/size for the repo file
|
||||
IoRead *read = storageReadIo(storageNewReadP(storageRepo(), repoPathFile));
|
||||
|
||||
if (cipherType != cipherTypeNone)
|
||||
// Check the repo file in a try block because on error (e.g. missing or corrupt file that can't be decrypted or
|
||||
// decompressed) we should recopy rather than ending the backup.
|
||||
TRY_BEGIN()
|
||||
{
|
||||
ioFilterGroupAdd(
|
||||
ioReadFilterGroup(read), cipherBlockNew(cipherModeDecrypt, cipherType, BUFSTR(cipherPass), NULL));
|
||||
// Generate checksum/size for the repo file
|
||||
IoRead *read = storageReadIo(storageNewReadP(storageRepo(), repoPathFile));
|
||||
|
||||
if (cipherType != cipherTypeNone)
|
||||
{
|
||||
ioFilterGroupAdd(
|
||||
ioReadFilterGroup(read), cipherBlockNew(cipherModeDecrypt, cipherType, BUFSTR(cipherPass), NULL));
|
||||
}
|
||||
|
||||
if (repoFileCompress)
|
||||
ioFilterGroupAdd(ioReadFilterGroup(read), gzipDecompressNew(false));
|
||||
|
||||
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(HASH_TYPE_SHA1_STR));
|
||||
ioFilterGroupAdd(ioReadFilterGroup(read), ioSizeNew());
|
||||
|
||||
ioReadDrain(read);
|
||||
|
||||
// Test checksum/size
|
||||
const String *pgTestChecksum = varStr(
|
||||
ioFilterGroupResult(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE_STR));
|
||||
uint64_t pgTestSize = varUInt64Force(ioFilterGroupResult(ioReadFilterGroup(read), SIZE_FILTER_TYPE_STR));
|
||||
|
||||
// No need to recopy if checksum/size match
|
||||
if (pgFileSize == pgTestSize && strEq(pgFileChecksum, pgTestChecksum))
|
||||
{
|
||||
memContextSwitch(MEM_CONTEXT_OLD());
|
||||
result.backupCopyResult = backupCopyResultChecksum;
|
||||
result.copySize = pgTestSize;
|
||||
result.copyChecksum = strDup(pgTestChecksum);
|
||||
memContextSwitch(MEM_CONTEXT_TEMP());
|
||||
}
|
||||
// Else recopy when repo file is not as expected
|
||||
else
|
||||
result.backupCopyResult = backupCopyResultReCopy;
|
||||
}
|
||||
|
||||
if (repoFileCompress)
|
||||
ioFilterGroupAdd(ioReadFilterGroup(read), gzipDecompressNew(false));
|
||||
|
||||
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(HASH_TYPE_SHA1_STR));
|
||||
ioFilterGroupAdd(ioReadFilterGroup(read), ioSizeNew());
|
||||
|
||||
ioReadDrain(read);
|
||||
|
||||
// Test checksum/size
|
||||
const String *pgTestChecksum = varStr(
|
||||
ioFilterGroupResult(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE_STR));
|
||||
uint64_t pgTestSize = varUInt64Force(ioFilterGroupResult(ioReadFilterGroup(read), SIZE_FILTER_TYPE_STR));
|
||||
|
||||
// No need to recopy if checksum/size match
|
||||
if (pgFileSize == pgTestSize && strEq(pgFileChecksum, pgTestChecksum))
|
||||
// Recopy on any kind of error
|
||||
CATCH_ANY()
|
||||
{
|
||||
memContextSwitch(MEM_CONTEXT_OLD());
|
||||
result.backupCopyResult = backupCopyResultChecksum;
|
||||
result.copySize = pgTestSize;
|
||||
result.copyChecksum = strDup(pgTestChecksum);
|
||||
memContextSwitch(MEM_CONTEXT_TEMP());
|
||||
}
|
||||
// Else recopy when repo file is not as expected
|
||||
else
|
||||
result.backupCopyResult = backupCopyResultReCopy;
|
||||
}
|
||||
TRY_END();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -234,6 +234,22 @@ testRun(void)
|
||||
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
||||
true, " copy");
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
TEST_TITLE("resumed file is missing in repo but present in resumed manfest, recopy");
|
||||
|
||||
TEST_ASSIGN(
|
||||
result,
|
||||
backupFile(
|
||||
pgFile, false, 9, strNew("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, STRDEF(BOGUS_STR), false, false, 1,
|
||||
backupLabel, true, cipherTypeNone, NULL),
|
||||
"backup file");
|
||||
TEST_RESULT_UINT(result.copySize + result.repoSize, 18, " copy=repo=pgFile size");
|
||||
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, " check copy result");
|
||||
TEST_RESULT_BOOL(
|
||||
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
||||
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
||||
true, " recopy");
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
// File exists in repo and db, checksum not same in repo, delta set, ignoreMissing false, no hasReference - RECOPY
|
||||
TEST_RESULT_VOID(
|
||||
|
Loading…
Reference in New Issue
Block a user