1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2025-07-15 01:04:37 +02:00

Add repository checksum to make verify and resume more efficient.

Calculate a checksum of the data stored in the repository when a file is transformed (e.g. compressed). This allows resume and verify to operate without needing to decompress/decrypt the data.

This can also be used to verify more complex formats such as block incremental and allow backups from the repository without needing to decompress the data to verify the checksum.

Add some basic encrypted tests to maintain coverage. These will be expanded in a future commit.
This commit is contained in:
David Steele
2022-12-22 09:26:26 +07:00
committed by GitHub
parent 2ab845e263
commit 56b55f81e8
12 changed files with 281 additions and 290 deletions

View File

@ -37,13 +37,16 @@
<commit subject="Store manifest checksums in memory more efficiently.">
<github-pull-request id="1953"/>
</commit>
<commit subject="Add repository checksum to make verify and resume more efficient.">
<github-pull-request id="1956"/>
</commit>
<release-item-contributor-list>
<release-item-contributor id="david.steele"/>
<release-item-reviewer id="stefan.fercot"/>
</release-item-contributor-list>
<p>Store manifest checksums in memory more efficiently.</p>
<p>Add repository checksum to make verify and resume more efficient.</p>
</release-item>
</release-development-list>
</release-core-list>

View File

@ -582,6 +582,7 @@ backupResumeClean(
file.sizeRepo = fileResume.sizeRepo;
file.checksumSha1 = fileResume.checksumSha1;
file.checksumRepoSha1 = fileResume.checksumRepoSha1;
file.checksumPage = fileResume.checksumPage;
file.checksumPageError = fileResume.checksumPageError;
file.checksumPageErrorList = fileResume.checksumPageErrorList;
@ -919,6 +920,7 @@ backupFilePut(BackupData *backupData, Manifest *manifest, const String *name, ti
MEM_CONTEXT_TEMP_BEGIN()
{
// Create file
bool repoChecksum = false;
const String *manifestName = strNewFmt(MANIFEST_TARGET_PGDATA "/%s", strZ(name));
CompressType compressType = compressTypeEnum(cfgOptionStrId(cfgOptCompressType));
@ -939,11 +941,24 @@ backupFilePut(BackupData *backupData, Manifest *manifest, const String *name, ti
{
ioFilterGroupAdd(
ioWriteFilterGroup(storageWriteIo(write)), compressFilter(compressType, cfgOptionInt(cfgOptCompressLevel)));
repoChecksum = true;
}
// Add encryption filter if required
cipherBlockFilterGroupAdd(
filterGroup, cfgOptionStrId(cfgOptRepoCipherType), cipherModeEncrypt, manifestCipherSubPass(manifest));
if (manifestCipherSubPass(manifest) != NULL)
{
ioFilterGroupAdd(
ioWriteFilterGroup(storageWriteIo(write)),
cipherBlockNewP(
cipherModeEncrypt, cfgOptionStrId(cfgOptRepoCipherType), BUFSTR(manifestCipherSubPass(manifest))));
repoChecksum = true;
}
// Capture checksum of file stored in the repo if filters that modify the output have been applied
if (repoChecksum)
ioFilterGroupAdd(filterGroup, cryptoHashNew(hashTypeSha1));
// Add size filter last to calculate repo size
ioFilterGroupAdd(filterGroup, ioSizeNew());
@ -964,9 +979,12 @@ backupFilePut(BackupData *backupData, Manifest *manifest, const String *name, ti
.size = strSize(content),
.sizeRepo = pckReadU64P(ioFilterGroupResultP(filterGroup, SIZE_FILTER_TYPE)),
.timestamp = timestamp,
.checksumSha1 = bufPtr(pckReadBinP(ioFilterGroupResultP(filterGroup, CRYPTO_HASH_FILTER_TYPE))),
.checksumSha1 = bufPtr(pckReadBinP(ioFilterGroupResultP(filterGroup, CRYPTO_HASH_FILTER_TYPE, .idx = 0))),
};
if (repoChecksum)
file.checksumRepoSha1 = bufPtr(pckReadBinP(ioFilterGroupResultP(filterGroup, CRYPTO_HASH_FILTER_TYPE, .idx = 1)));
manifestFileAdd(manifest, &file);
LOG_DETAIL_FMT("wrote '%s' file returned from backup stop function", strZ(name));
@ -1165,6 +1183,7 @@ backupJobResult(
const uint64_t bundleOffset = pckReadU64P(jobResult);
const uint64_t repoSize = pckReadU64P(jobResult);
const Buffer *const copyChecksum = pckReadBinP(jobResult);
const Buffer *const repoChecksum = pckReadBinP(jobResult);
PackRead *const checksumPageResult = pckReadPackReadP(jobResult);
// Increment backup copy progress
@ -1305,6 +1324,7 @@ backupJobResult(
file.size = copySize;
file.sizeRepo = repoSize;
file.checksumSha1 = bufPtrConst(copyChecksum);
file.checksumRepoSha1 = repoChecksum != NULL ? bufPtrConst(repoChecksum) : NULL;
file.reference = NULL;
file.checksumPageError = checksumPageError;
file.checksumPageErrorList = checksumPageErrorList != NULL ?
@ -1718,6 +1738,8 @@ static ProtocolParallelJob *backupJobCallback(void *data, unsigned int clientIdx
pckWriteBinP(param, file.checksumSha1 != NULL ? BUF(file.checksumSha1, HASH_TYPE_SHA1_SIZE) : NULL);
pckWriteBoolP(param, file.checksumPage);
pckWriteStrP(param, file.name);
pckWriteBinP(param, file.checksumRepoSha1 != NULL ? BUF(file.checksumRepoSha1, HASH_TYPE_SHA1_SIZE) : NULL);
pckWriteU64P(param, file.sizeRepo);
pckWriteBoolP(param, file.resume);
pckWriteBoolP(param, file.reference != NULL);
@ -2091,10 +2113,9 @@ backupArchiveCheckCopy(const BackupData *const backupData, Manifest *const manif
.size = backupData->walSegmentSize,
.sizeRepo = pckReadU64P(ioFilterGroupResultP(filterGroup, SIZE_FILTER_TYPE)),
.timestamp = manifestData(manifest)->backupTimestampStop,
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, strSubN(archiveFile, 25, 40))),
};
file.checksumSha1 = bufPtr(bufNewDecode(encodingHex, strSubN(archiveFile, 25, 40)));
manifestFileAdd(manifest, &file);
}
}

View File

@ -135,28 +135,11 @@ backupFile(
}
// Else if the pg file matches or is unknown because delta was not performed then check the repo file
else if (!file->pgFileDelta || pgFileMatch)
{
// Check the repo file in a try block because on error (e.g. missing or corrupt file that can't be decrypted
// or decompressed) we should recopy rather than ending the backup.
TRY_BEGIN()
{
// Generate checksum/size for the repo file
IoRead *read = storageReadIo(storageNewReadP(storageRepo(), repoFile));
if (cipherType != cipherTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(read),
cipherBlockNewP(cipherModeDecrypt, cipherType, BUFSTR(cipherPass)));
}
// Decompress the file if compressed
if (repoFileCompressType != compressTypeNone)
ioFilterGroupAdd(ioReadFilterGroup(read), decompressFilter(repoFileCompressType));
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(hashTypeSha1));
ioFilterGroupAdd(ioReadFilterGroup(read), ioSizeNew());
ioReadDrain(read);
// Test checksum/size
@ -164,14 +147,18 @@ backupFile(
ioFilterGroupResultP(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE));
uint64_t pgTestSize = pckReadU64P(ioFilterGroupResultP(ioReadFilterGroup(read), SIZE_FILTER_TYPE));
// No need to recopy if checksum/size match
if (file->pgFileSize == pgTestSize && bufEq(file->pgFileChecksum, pgTestChecksum))
// No need to recopy if checksum/size match. When the repo checksum is missing still compare to repo size
// since the repo checksum should only be missing when the repo file was not compressed/encrypted, i.e. the
// repo size should match the original size. There is no need to worry about old manifests here since resume
// does not work across versions.
if (file->repoFileSize == pgTestSize &&
bufEq(file->repoFileChecksum != NULL ? file->repoFileChecksum : file->pgFileChecksum, pgTestChecksum))
{
MEM_CONTEXT_BEGIN(lstMemContext(result))
{
fileResult->backupCopyResult = backupCopyResultChecksum;
fileResult->copySize = pgTestSize;
fileResult->copyChecksum = bufDup(pgTestChecksum);
fileResult->copySize = file->pgFileSize;
fileResult->copyChecksum = bufDup(file->pgFileChecksum);
}
MEM_CONTEXT_END();
}
@ -179,13 +166,6 @@ backupFile(
else
fileResult->backupCopyResult = backupCopyResultReCopy;
}
// Recopy on any kind of error
CATCH_ANY()
{
fileResult->backupCopyResult = backupCopyResultReCopy;
}
TRY_END();
}
}
}
MEM_CONTEXT_TEMP_END();
@ -211,6 +191,8 @@ backupFile(
// Setup pg file for read. Only read as many bytes as passed in pgFileSize. If the file is growing it does no
// good to copy data past the end of the size recorded in the manifest since those blocks will need to be
// replayed from WAL during recovery.
bool repoChecksum = false;
StorageRead *read = storageNewReadP(
storagePg(), file->pgFile, .ignoreMissing = file->pgFileIgnoreMissing, .compressible = compressible,
.limit = file->pgFileCopyExactSize ? VARUINT64(file->pgFileSize) : NULL);
@ -231,6 +213,8 @@ backupFile(
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)), compressFilter(repoFileCompressType, repoFileCompressLevel));
repoChecksum = true;
}
// If there is a cipher then add the encrypt filter
@ -239,8 +223,14 @@ backupFile(
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)),
cipherBlockNewP(cipherModeEncrypt, cipherType, BUFSTR(cipherPass)));
repoChecksum = true;
}
// Capture checksum of file stored in the repo if filters that modify the output have been applied
if (repoChecksum)
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), cryptoHashNew(hashTypeSha1));
// Add size filter last to calculate repo size
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), ioSizeNew());
@ -276,7 +266,7 @@ backupFile(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), SIZE_FILTER_TYPE, .idx = 0));
fileResult->bundleOffset = bundleOffset;
fileResult->copyChecksum = pckReadBinP(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE));
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE, .idx = 0));
fileResult->repoSize = pckReadU64P(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), SIZE_FILTER_TYPE, .idx = 1));
@ -286,6 +276,14 @@ backupFile(
fileResult->pageChecksumResult = pckDup(
ioFilterGroupResultPackP(ioReadFilterGroup(storageReadIo(read)), PAGE_CHECKSUM_FILTER_TYPE));
}
// Get repo checksum
if (repoChecksum)
{
fileResult->repoChecksum = pckReadBinP(
ioFilterGroupResultP(
ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE, .idx = 1));
}
}
MEM_CONTEXT_END();

View File

@ -34,6 +34,8 @@ typedef struct BackupFile
const Buffer *pgFileChecksum; // Expected pg file checksum
bool pgFileChecksumPage; // Validate page checksums?
const String *manifestFile; // Repo file
const Buffer *repoFileChecksum; // Expected repo file checksum
uint64_t repoFileSize; // Expected repo file size
bool manifestFileResume; // Checksum repo file before copying
bool manifestFileHasReference; // Reference to prior backup, if any
} BackupFile;
@ -44,6 +46,7 @@ typedef struct BackupFileResult
BackupCopyResult backupCopyResult;
uint64_t copySize;
Buffer *copyChecksum;
Buffer *repoChecksum; // Checksum repo file (including compression, etc.)
uint64_t bundleOffset; // Offset in bundle if any
uint64_t repoSize;
Pack *pageChecksumResult;

View File

@ -47,6 +47,8 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
file.pgFileChecksum = pckReadBinP(param);
file.pgFileChecksumPage = pckReadBoolP(param);
file.manifestFile = pckReadStrP(param);
file.repoFileChecksum = pckReadBinP(param);
file.repoFileSize = pckReadU64P(param);
file.manifestFileResume = pckReadBoolP(param);
file.manifestFileHasReference = pckReadBoolP(param);
@ -70,6 +72,7 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
pckWriteU64P(resultPack, fileResult->bundleOffset);
pckWriteU64P(resultPack, fileResult->repoSize);
pckWriteBinP(resultPack, fileResult->copyChecksum);
pckWriteBinP(resultPack, fileResult->repoChecksum);
pckWritePackP(resultPack, fileResult->pageChecksumResult);
}

View File

@ -994,11 +994,22 @@ verifyBackup(VerifyJobData *const jobData)
pckWriteBoolP(param, false);
}
// Use the repo checksum when present
if (fileData.checksumRepoSha1 != NULL)
{
pckWriteU32P(param, compressTypeNone);
pckWriteBinP(param, BUF(fileData.checksumRepoSha1, HASH_TYPE_SHA1_SIZE));
pckWriteU64P(param, fileData.sizeRepo);
pckWriteStrP(param, NULL);
}
// Else use the file checksum, which may require additional filters, e.g. decompression
else
{
pckWriteU32P(param, manifestData(jobData->manifest)->backupOptionCompressType);
// If the checksum is not present in the manifest, it will be calculated by manifest load
pckWriteBinP(param, BUF(fileData.checksumSha1, HASH_TYPE_SHA1_SIZE));
pckWriteU64P(param, fileData.size);
pckWriteStrP(param, jobData->backupCipherPass);
}
// Assign job to result (prepend backup label being processed to the key since some files are in a prior
// backup)

View File

@ -95,6 +95,7 @@ static time_t manifestPackBaseTime = -1;
typedef enum
{
manifestFilePackFlagChecksum,
manifestFilePackFlagChecksumRepo,
manifestFilePackFlagReference,
manifestFilePackFlagBundle,
manifestFilePackFlagCopy,
@ -131,6 +132,9 @@ manifestFilePack(const Manifest *const manifest, const ManifestFile *const file)
if (file->checksumSha1 != NULL)
flag |= 1 << manifestFilePackFlagChecksum;
if (file->checksumRepoSha1 != NULL)
flag |= 1 << manifestFilePackFlagChecksumRepo;
if (file->copy)
flag |= 1 << manifestFilePackFlagCopy;
@ -188,6 +192,13 @@ manifestFilePack(const Manifest *const manifest, const ManifestFile *const file)
bufferPos += HASH_TYPE_SHA1_SIZE;
}
// SHA1 repo checksum
if (file->checksumRepoSha1 != NULL)
{
memcpy((uint8_t *)buffer + bufferPos, file->checksumRepoSha1, HASH_TYPE_SHA1_SIZE);
bufferPos += HASH_TYPE_SHA1_SIZE;
}
// Reference
if (file->reference != NULL)
{
@ -290,6 +301,13 @@ manifestFileUnpack(const Manifest *const manifest, const ManifestFilePack *const
bufferPos += HASH_TYPE_SHA1_SIZE;
}
// SHA1 repo checksum
if (flag & (1 << manifestFilePackFlagChecksumRepo))
{
result.checksumRepoSha1 = (const uint8_t *)filePack + bufferPos;
bufferPos += HASH_TYPE_SHA1_SIZE;
}
// Reference
if (flag & (1 << manifestFilePackFlagReference))
{
@ -1523,6 +1541,7 @@ manifestBuildIncr(Manifest *this, const Manifest *manifestPrior, BackupType type
{
file.sizeRepo = filePrior.sizeRepo;
file.checksumSha1 = filePrior.checksumSha1;
file.checksumRepoSha1 = filePrior.checksumRepoSha1;
file.reference = filePrior.reference != NULL ? filePrior.reference : manifestPrior->pub.data.backupLabel;
file.checksumPage = filePrior.checksumPage;
file.checksumPageError = filePrior.checksumPageError;
@ -1684,6 +1703,7 @@ manifestBuildComplete(
#define MANIFEST_KEY_BUNDLE_ID STRID5("bni", 0x25c20)
#define MANIFEST_KEY_BUNDLE_OFFSET STRID5("bno", 0x3dc20)
#define MANIFEST_KEY_CHECKSUM STRID5("checksum", 0x6d66b195030)
#define MANIFEST_KEY_CHECKSUM_REPO STRID5("rck", 0x2c720)
#define MANIFEST_KEY_CHECKSUM_PAGE "checksum-page"
#define MANIFEST_KEY_CHECKSUM_PAGE_ERROR "checksum-page-error"
#define MANIFEST_KEY_DB_CATALOG_VERSION "db-catalog-version"
@ -1848,6 +1868,11 @@ manifestLoadCallback(void *callbackData, const String *const section, const Stri
else
file.mode = manifest->fileModeDefault;
// The repo checksum might not exist if this is a partial save that was done during the backup to preserve checksums for
// already backed up files or if this is an older manifest
if (jsonReadKeyExpectStrId(json, MANIFEST_KEY_CHECKSUM_REPO))
file.checksumRepoSha1 = bufPtr(bufNewDecode(encodingHex, jsonReadStr(json)));
// Reference
if (jsonReadKeyExpectStrId(json, MANIFEST_KEY_REFERENCE))
{
@ -2555,6 +2580,15 @@ manifestSaveCallback(void *const callbackData, const String *const sectionNext,
if (file.mode != saveData->fileModeDefault)
jsonWriteStrFmt(jsonWriteKeyZ(json, MANIFEST_KEY_MODE), "%04o", file.mode);
// Save if the repo checksum is not null. The repo checksum for zero-length files may vary depending on compression
// and encryption applied.
if (file.checksumRepoSha1 != NULL)
{
jsonWriteStr(
jsonWriteKeyStrId(json, MANIFEST_KEY_CHECKSUM_REPO),
strNewEncode(encodingHex, BUF(file.checksumRepoSha1, HASH_TYPE_SHA1_SIZE)));
}
if (file.reference != NULL)
jsonWriteStr(jsonWriteKeyStrId(json, MANIFEST_KEY_REFERENCE), file.reference);

View File

@ -106,6 +106,7 @@ typedef struct ManifestFile
bool checksumPageError:1; // Is there an error in the page checksum?
mode_t mode; // File mode
const uint8_t *checksumSha1; // SHA1 checksum
const uint8_t *checksumRepoSha1; // SHA1 checksum as stored in repo (including compression, etc.)
const String *checksumPageErrorList; // List of page checksum errors if there are any
const String *user; // User name
const String *group; // Group name

View File

@ -603,6 +603,9 @@ sub backupCompare
foreach my $strFileKey ($oActualManifest->keys(MANIFEST_SECTION_TARGET_FILE))
{
# Remove repo checksum
$oActualManifest->remove(&MANIFEST_SECTION_TARGET_FILE, $strFileKey, 'rck');
# Determine repo size if compression or encryption is enabled
my $strCompressType = $oExpectedManifest->{&MANIFEST_SECTION_BACKUP_OPTION}{&MANIFEST_KEY_COMPRESS_TYPE};
@ -1945,6 +1948,9 @@ sub restoreCompare
foreach my $strName ($oActualManifest->keys(MANIFEST_SECTION_TARGET_FILE))
{
# Remove repo checksum
delete($oExpectedManifestRef->{&MANIFEST_SECTION_TARGET_FILE}{$strName}{'rck'});
# When bundling zero-length files will not have a reference
if ($oExpectedManifestRef->{&MANIFEST_SECTION_BACKUP}{'backup-bundle'} &&
$oExpectedManifestRef->{&MANIFEST_SECTION_TARGET_FILE}{$strName}{&MANIFEST_SUBKEY_SIZE} == 0)

View File

@ -112,6 +112,8 @@ testBackupValidateList(
storage, strNewFmt("%s/%s", strZ(path), strZ(info.name)), .offset = file.bundleOffset,
.limit = VARUINT64(file.sizeRepo));
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), cryptoHashNew(hashTypeSha1));
if (manifestData->backupOptionCompressType != compressTypeNone)
{
ioFilterGroupAdd(
@ -121,10 +123,22 @@ testBackupValidateList(
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), cryptoHashNew(hashTypeSha1));
uint64_t size = bufUsed(storageGetP(read));
strCatFmt(result, ", s=%" PRIu64, size);
// Validate repo checksum
if (file.checksumRepoSha1 != NULL)
{
const Buffer *const checksum = pckReadBinP(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE));
strCatFmt(result, ", s=%" PRIu64, size);
if (!bufEq(checksum, BUF(file.checksumRepoSha1, HASH_TYPE_SHA1_SIZE)))
THROW_FMT(AssertError, "'%s' repo checksum does match manifest", strZ(file.name));
}
// Validate checksum
const Buffer *const checksum = pckReadBinP(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE, .idx = 1));
if (!bufEq(checksum, BUF(file.checksumSha1, HASH_TYPE_SHA1_SIZE)))
THROW_FMT(AssertError, "'%s' checksum does match manifest", strZ(file.name));
@ -219,6 +233,9 @@ testBackupValidateList(
file.bundleId = 0;
file.bundleOffset = 0;
// Remove repo checksum since it has been validated
file.checksumRepoSha1 = NULL;
// Update changes to manifest file
manifestFilePackUpdate(manifest, filePack, &file);
}
@ -311,6 +328,8 @@ typedef struct TestBackupPqScriptParam
bool noPriorWal; // Don't write prior test WAL segments
bool noArchiveCheck; // Do not check archive
CompressType walCompressType; // Compress type for the archive files
CipherType cipherType; // Cipher type
const char *cipherPass; // Cipher pass
unsigned int walTotal; // Total WAL to write
unsigned int timeline; // Timeline to use for WAL files
} TestBackupPqScriptParam;
@ -360,7 +379,9 @@ testBackupPqScript(unsigned int pgVersion, time_t backupTimeStart, TestBackupPqS
// -----------------------------------------------------------------------------------------------------------------------------
if (!param.noPriorWal)
{
InfoArchive *infoArchive = infoArchiveLoadFile(storageRepo(), INFO_ARCHIVE_PATH_FILE_STR, cipherTypeNone, NULL);
InfoArchive *infoArchive = infoArchiveLoadFile(
storageRepo(), INFO_ARCHIVE_PATH_FILE_STR, param.cipherType == 0 ? cipherTypeNone : param.cipherType,
param.cipherPass == NULL ? NULL : STR(param.cipherPass));
const String *archiveId = infoArchiveId(infoArchive);
StringList *walSegmentList = pgLsnRangeToWalSegmentList(
param.timeline, lsnStart - pgControl.walSegmentSize, param.noWal ? lsnStart - pgControl.walSegmentSize : lsnStop,
@ -1043,87 +1064,10 @@ testRun(void)
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
TEST_STORAGE_GET(storageRepo(), strZ(backupPathFile), "atestfile###", .comment = "confirm contents");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("resumed file is missing in repo but present in resumed manifest, file same name in repo - RECOPY");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileDelta = true,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = bufNewDecode(encodingHex, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67")),
.pgFileChecksumPage = false,
.manifestFile = STRDEF(BOGUS_STR),
.manifestFileResume = true,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
TEST_STORAGE_LIST(
storageRepo(), STORAGE_REPO_BACKUP "/20190718-155825F", "testfile\n", .comment = "resumed file is missing in repo");
TEST_ASSIGN(
result,
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, cipherTypeNone, NULL, fileList), 0),
"backup 9 bytes of pgfile to file to resume in repo");
TEST_RESULT_UINT(result.copySize, 9, "copy 9 bytes");
TEST_RESULT_UINT(result.repoSize, 9, "repo=copy size");
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, "check recopy result");
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, result.copyChecksum), "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67",
"copy checksum for file size 9");
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
TEST_STORAGE_GET(
storageRepo(), strZ(backupPathFile), "atestfile###", .comment = "existing file with same name as pgFile not modified");
TEST_STORAGE_GET(
storageRepo(), STORAGE_REPO_BACKUP "/20190718-155825F/" BOGUS_STR, "atestfile", .comment = "resumed file copied");
storageRepoWrite(), STORAGE_REPO_BACKUP "/20190718-155825F", "testfile\n", .remove = true,
.comment = "resumed file is missing in repo");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("file exists in repo & db, checksum not same in repo - RECOPY");
HRN_STORAGE_PUT_Z(
storageRepoWrite(), strZ(backupPathFile), "adifferentfile",
.comment = "create different file (size and checksum) with same name in repo");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileDelta = true,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = bufNewDecode(encodingHex, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67")),
.pgFileChecksumPage = false,
.manifestFile = pgFile,
.manifestFileResume = true,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
// Delta set, ignoreMissing false, no hasReference
TEST_ASSIGN(
result,
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, cipherTypeNone, NULL, fileList), 0),
"db & repo file, pgFileMatch, repo checksum no match, no ignoreMissing, no pageChecksum, delta, no hasReference");
TEST_RESULT_UINT(result.copySize, 9, "copy 9 bytes");
TEST_RESULT_UINT(result.repoSize, 9, "repo=copy size");
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, "recopy file");
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, result.copyChecksum), "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67",
"copy checksum for file size 9");
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
TEST_STORAGE_GET(storageRepo(), strZ(backupPathFile), "atestfile", .comment = "existing file recopied");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("file exists in repo but missing from db, checksum same in repo - SKIP");
@ -1140,6 +1084,8 @@ testRun(void)
.pgFileChecksum = bufNewDecode(encodingHex, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67")),
.pgFileChecksumPage = false,
.manifestFile = pgFile,
.repoFileChecksum = bufNewDecode(encodingHex, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67")),
.repoFileSize = 9,
.manifestFileResume = true,
.manifestFileHasReference = false,
};
@ -1155,7 +1101,7 @@ testRun(void)
TEST_RESULT_PTR(result.copyChecksum, NULL, "copy checksum NULL");
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
TEST_STORAGE_LIST(
storageRepo(), STORAGE_REPO_BACKUP "/20190718-155825F", BOGUS_STR "\n", .comment = "file removed from repo");
storageRepo(), STORAGE_REPO_BACKUP "/20190718-155825F", NULL, .comment = "file removed from repo");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("compression set, all other boolean parameters false - COPY");
@ -1192,40 +1138,6 @@ testRun(void)
storageRepo(), zNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(backupLabel), strZ(pgFile)),
.comment = "copy file to repo compress success");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("pg and repo file exist & match, prior checksum, compression - COPY CHECKSUM");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = bufNewDecode(encodingHex, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67")),
.pgFileChecksumPage = false,
.manifestFile = pgFile,
.manifestFileResume = true,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
TEST_ASSIGN(
result,
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeGz, 3, cipherTypeNone, NULL, fileList), 0),
"pg file & repo exists, match, checksum, no ignoreMissing, compression, no pageChecksum, no delta, no hasReference");
TEST_RESULT_UINT(result.copySize, 9, "copy=pgFile size");
TEST_RESULT_UINT(result.repoSize, 0, "repo size not calculated");
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultChecksum, "checksum file");
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, result.copyChecksum), "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67",
"compressed repo file checksum matches");
TEST_STORAGE_EXISTS(
storageRepo(), zNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(backupLabel), strZ(pgFile)),
.comment = "compressed file exists");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("create a zero sized file - checksum will be set but in backupManifestUpdate it will not be copied");
@ -1261,7 +1173,6 @@ testRun(void)
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
TEST_STORAGE_LIST(
storageRepo(), STORAGE_REPO_BACKUP "/20190718-155825F",
BOGUS_STR "\n"
"testfile.gz\n"
"zerofile\n",
.comment = "copy zero file to repo success");
@ -1316,116 +1227,6 @@ testRun(void)
TEST_STORAGE_GET(
storageRepo(), strZ(backupPathFile), "atestfile", .cipherType = cipherTypeAes256Cbc,
.comment = "copy file to encrypted repo success");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("delta, copy file (size mismatch) to encrypted repo");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileDelta = true,
.pgFileIgnoreMissing = false,
.pgFileSize = 8,
.pgFileCopyExactSize = true,
.pgFileChecksum = bufNewDecode(encodingHex, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67")),
.pgFileChecksumPage = false,
.manifestFile = pgFile,
.manifestFileResume = true,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
// Delta but pgFile does not match size passed, prior checksum, no compression, no pageChecksum, delta, no hasReference
TEST_ASSIGN(
result,
*(BackupFileResult *)lstGet(
backupFile(repoFile, compressTypeNone, 1, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
"pg and repo file exists, pgFileMatch false, no ignoreMissing, no pageChecksum, delta, no hasReference");
TEST_RESULT_UINT(result.copySize, 8, "copy size set");
TEST_RESULT_UINT(result.repoSize, 32, "repo size set");
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, "copy file");
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, result.copyChecksum), "acc972a8319d4903b839c64ec217faa3e77b4fcb",
"copy checksum for size passed");
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum NULL");
TEST_STORAGE_GET(
storageRepo(), strZ(backupPathFile), "atestfil", .cipherType = cipherTypeAes256Cbc,
.comment = "delta, copy file (size missmatch) to encrypted repo success");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("no delta, recopy (size mismatch) file to encrypted repo");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = bufNewDecode(encodingHex, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67")),
.pgFileChecksumPage = false,
.manifestFile = pgFile,
.manifestFileResume = true,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
TEST_ASSIGN(
result,
*(BackupFileResult *)lstGet(
backupFile(repoFile, compressTypeNone, 0, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
"pg and repo file exists, checksum mismatch, no ignoreMissing, no pageChecksum, no delta, no hasReference");
TEST_RESULT_UINT(result.copySize, 9, "copy size set");
TEST_RESULT_UINT(result.repoSize, 32, "repo size set");
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, "recopy file");
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, result.copyChecksum), "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "copy checksum");
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum NULL");
TEST_STORAGE_GET(
storageRepoWrite(), strZ(backupPathFile), "atestfile", .cipherType = cipherTypeAes256Cbc,
.comment = "recopy file to encrypted repo success");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("no delta, recopy (checksum mismatch), file to encrypted repo");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = bufNewDecode(encodingHex, STRDEF("1234567890123456789012345678901234567890")),
.pgFileChecksumPage = false,
.manifestFile = pgFile,
.manifestFileResume = true,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
TEST_ASSIGN(
result,
*(BackupFileResult *)lstGet(
backupFile(repoFile, compressTypeNone, 0, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
"backup file");
TEST_RESULT_UINT(result.copySize, 9, "copy size set");
TEST_RESULT_UINT(result.repoSize, 32, "repo size set");
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, "recopy file");
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, result.copyChecksum), "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67",
"copy checksum for size passed");
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum NULL");
TEST_STORAGE_GET(
storageRepo(), strZ(backupPathFile), "atestfile",
.cipherType = cipherTypeAes256Cbc, .comment = "recopy file to encrypted repo, success");
}
// *****************************************************************************************************************************
@ -2233,6 +2034,7 @@ testRun(void)
ManifestFile file = manifestFileUnpack(manifestResume, *filePack);
file.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("06d06bb31b570b94d7b4325f511f853dbe771c21")));
file.checksumRepoSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("06d06bb31b570b94d7b4325f511f853dbe771c21")));
manifestFilePackUpdate(manifestResume, filePack, &file);
@ -2341,6 +2143,7 @@ testRun(void)
manifestResume, &(ManifestFile){
.name = STRDEF("pg_data/size-mismatch"),
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))),
.checksumRepoSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))),
.size = 33});
// Time does not match between cluster and resume manifest
@ -2351,6 +2154,7 @@ testRun(void)
manifestResume, &(ManifestFile){
.name = STRDEF("pg_data/time-mismatch"),
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))),
.checksumRepoSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))),
.size = 4, .timestamp = backupTimeStart - 1});
// Size is zero in cluster and resume manifest. ??? We'd like to remove this requirement after the migration.
@ -2528,8 +2332,31 @@ testRun(void)
manifestResume, &(ManifestFile){
.name = STRDEF("pg_data/time-mismatch2"),
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("984816fd329622876e14907634264e6f332e9fb3"))),
// .checksumRepoSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("984816fd329622876e14907634264e6f332e9fb3"))),
.size = 4, .timestamp = backupTimeStart});
// File does not match what is in manifest
HRN_STORAGE_PUT_Z(storagePgWrite(), "content-mismatch", "TEST", .timeModified = backupTimeStart);
HRN_STORAGE_PUT_EMPTY(
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/content-mismatch.gz", strZ(resumeLabel)));
manifestFileAdd(
manifestResume, &(ManifestFile){
.name = STRDEF("pg_data/content-mismatch"),
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))),
.checksumRepoSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))),
.size = 4, .timestamp = backupTimeStart});
// Repo size mismatch
HRN_STORAGE_PUT_Z(storagePgWrite(), "repo-size-mismatch", "TEST", .timeModified = backupTimeStart);
HRN_STORAGE_PUT_EMPTY(
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/repo-size-mismatch.gz", strZ(resumeLabel)));
manifestFileAdd(
manifestResume, &(ManifestFile){
.name = STRDEF("pg_data/repo-size-mismatch"),
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("984816fd329622876e14907634264e6f332e9fb3"))),
.checksumRepoSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))),
.size = 4, .sizeRepo = 4, .timestamp = backupTimeStart});
// Links are always removed on resume
THROW_ON_SYS_ERROR(
symlink(
@ -2578,6 +2405,12 @@ testRun(void)
" an issue unless the resumed backup path in the repository is known to be corrupted.\n"
" NOTE: this does not indicate a problem with the PostgreSQL page checksums.\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/time-mismatch2 (4B, [PCT]) checksum [SHA1]\n"
"P00 WARN: resumed backup file pg_data/repo-size-mismatch does not have expected checksum"
" 984816fd329622876e14907634264e6f332e9fb3. The file will be recopied and backup will continue but this may be"
" an issue unless the resumed backup path in the repository is known to be corrupted.\n"
" NOTE: this does not indicate a problem with the PostgreSQL page checksums.\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/repo-size-mismatch (4B, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/content-mismatch (4B, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: match file from prior backup " TEST_PATH "/pg1/PG_VERSION (3B, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/resume-ref (0B, [PCT])\n"
"P00 DETAIL: hardlink pg_data/PG_VERSION to 20191003-105320F\n"
@ -2586,7 +2419,7 @@ testRun(void)
"P00 INFO: backup stop archive = 0000000105D9759000000000, lsn = 5d97590/800000\n"
"P00 INFO: check archive for segment(s) 0000000105D9759000000000:0000000105D9759000000000\n"
"P00 INFO: new backup label = 20191003-105320F_20191004-144000D\n"
"P00 INFO: diff backup size = [SIZE], file total = 5");
"P00 INFO: diff backup size = [SIZE], file total = 7");
// Check repo directory
TEST_RESULT_STR_Z(
@ -2594,10 +2427,12 @@ testRun(void)
". {link, d=20191003-105320F_20191004-144000D}\n"
"pg_data {path}\n"
"pg_data/PG_VERSION.gz {file, s=3}\n"
"pg_data/content-mismatch.gz {file, s=4}\n"
"pg_data/global {path}\n"
"pg_data/global/pg_control.gz {file, s=8192}\n"
"pg_data/pg_xlog {path}\n"
"pg_data/postgresql.conf.gz {file, s=11}\n"
"pg_data/repo-size-mismatch.gz {file, s=4}\n"
"pg_data/resume-ref.gz {file, s=0}\n"
"pg_data/time-mismatch2.gz {file, s=4}\n"
"--------\n"
@ -2607,9 +2442,13 @@ testRun(void)
"[target:file]\n"
"pg_data/PG_VERSION={\"checksum\":\"06d06bb31b570b94d7b4325f511f853dbe771c21\",\"reference\":\"20191003-105320F\""
",\"size\":3,\"timestamp\":1570000000}\n"
"pg_data/content-mismatch={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
",\"timestamp\":1570200000}\n"
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1570200000}\n"
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\""
",\"reference\":\"20191003-105320F\",\"size\":11,\"timestamp\":1570000000}\n"
"pg_data/repo-size-mismatch={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
",\"timestamp\":1570200000}\n"
"pg_data/resume-ref={\"size\":0,\"timestamp\":1570200000}\n"
"pg_data/time-mismatch2={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
",\"timestamp\":1570200100}\n"
@ -2623,6 +2462,8 @@ testRun(void)
// Remove test files
HRN_STORAGE_REMOVE(storagePgWrite(), "resume-ref", .errorOnMissing = true);
HRN_STORAGE_REMOVE(storagePgWrite(), "time-mismatch2", .errorOnMissing = true);
HRN_STORAGE_REMOVE(storagePgWrite(), "content-mismatch", .errorOnMissing = true);
HRN_STORAGE_REMOVE(storagePgWrite(), "repo-size-mismatch", .errorOnMissing = true);
}
// -------------------------------------------------------------------------------------------------------------------------
@ -3373,6 +3214,69 @@ testRun(void)
"pg_data/global={}\n",
"compare file list");
}
// It is better to put as few tests here as possible because cmp/enc makes tests more expensive (especially with valgrind)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("online 11 full backup with comp/enc");
backupTimeStart = BACKUP_EPOCH + 3200000;
{
// Remove old pg data
HRN_STORAGE_PATH_REMOVE(storageTest, "pg1", .recurse = true);
// Update pg_control
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_11, .pageChecksum = false, .walSegmentSize = 2 * 1024 * 1024);
// Update version
HRN_STORAGE_PUT_Z(storagePgWrite(), PG_FILE_PGVERSION, PG_VERSION_11_STR, .timeModified = backupTimeStart);
// Create encrypted stanza
StringList *argList = strLstNew();
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
hrnCfgArgRawBool(argList, cfgOptOnline, false);
hrnCfgArgRawZ(argList, cfgOptRepoCipherType, "aes-256-cbc");
hrnCfgEnvRawZ(cfgOptRepoCipherPass, TEST_CIPHER_PASS);
HRN_CFG_LOAD(cfgCmdStanzaCreate, argList);
HRN_STORAGE_PATH_REMOVE(storageRepoIdxWrite(0), NULL, .recurse = true);
cmdStanzaCreate();
TEST_RESULT_LOG("P00 INFO: stanza-create for stanza 'test1' on repo1");
// Load options
argList = strLstNew();
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeFull);
hrnCfgArgRawZ(argList, cfgOptRepoCipherType, "aes-256-cbc");
hrnCfgEnvRawZ(cfgOptRepoCipherPass, TEST_CIPHER_PASS);
HRN_CFG_LOAD(cfgCmdBackup, argList);
// Run backup
testBackupPqScriptP(
PG_VERSION_11, backupTimeStart, .walCompressType = compressTypeNone, .cipherType = cipherTypeAes256Cbc,
.cipherPass = TEST_CIPHER_PASS, .walTotal = 2);
TEST_RESULT_VOID(hrnCmdBackup(), "backup");
TEST_RESULT_LOG(
"P00 INFO: execute non-exclusive backup start: backup begins after the next regular checkpoint completes\n"
"P00 INFO: backup start archive = 0000000105DC520000000000, lsn = 5dc5200/0\n"
"P00 INFO: check archive for segment 0000000105DC520000000000\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (2B, [PCT]) checksum [SHA1]\n"
"P00 INFO: execute non-exclusive backup stop and wait for all WAL segments to archive\n"
"P00 INFO: backup stop archive = 0000000105DC520000000001, lsn = 5dc5200/300000\n"
"P00 DETAIL: wrote 'backup_label' file returned from backup stop function\n"
"P00 DETAIL: wrote 'tablespace_map' file returned from backup stop function\n"
"P00 INFO: check archive for segment(s) 0000000105DC520000000000:0000000105DC520000000001\n"
"P00 INFO: new backup label = 20191108-080000F\n"
"P00 INFO: full backup size = [SIZE], file total = 4");
}
}
FUNCTION_HARNESS_RETURN_VOID();

View File

@ -1184,6 +1184,8 @@ testRun(void)
"\"reference\":\"20181119-152900F\",\"size\":4,\"timestamp\":1565282114}\n"
"pg_data/testfile={\"checksum\":\"%s\",\"reference\":\"20181119-152900F\",\"size\":7,\"timestamp\":1565282114}\n"
"pg_data/testfile2={\"checksum\":\"%s\",\"size\":7,\"timestamp\":1565282114}\n"
"pg_data/repochk={\"checksum\":\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\",\"rck\":\"%s\",\"repo-size\":7"
",\"size\":3,\"timestamp\":1565282114}\n"
"pg_data/testmissing="
"{\"checksum\":\"123473f470864e067ee3a22e64b47b0a1c356abc\",\"size\":7,\"timestamp\":1565282114}\n"
"pg_data/testother={\"checksum\":\"%s\",\"reference\":\"UNPROCESSEDBACKUP\",\"size\":7,\"timestamp\":1565282114}\n"
@ -1193,7 +1195,7 @@ testRun(void)
TEST_MANIFEST_PATH
TEST_MANIFEST_PATH_DEFAULT,
strZ(strNewEncode(encodingHex, fileChecksum)), strZ(strNewEncode(encodingHex, fileChecksum)),
strZ(strNewEncode(encodingHex, fileChecksum)));
strZ(strNewEncode(encodingHex, fileChecksum)), strZ(strNewEncode(encodingHex, fileChecksum)));
// Write manifests for dependent backup
HRN_INFO_PUT(
@ -1206,6 +1208,10 @@ testRun(void)
storageRepoIdxWrite(0),
STORAGE_REPO_BACKUP "/20181119-152900F_20181119-152909D/pg_data/testfile2", fileContents,
.comment = "put valid file to dependent");
HRN_STORAGE_PUT_Z(
storageRepoIdxWrite(0),
STORAGE_REPO_BACKUP "/20181119-152900F_20181119-152909D/pg_data/repochk", fileContents,
.comment = "put valid file to dependent");
// Create an unprocessed backup label with a file that will be referenced in the dependent manifest
HRN_STORAGE_PUT_Z(
@ -1277,8 +1283,8 @@ testRun(void)
" backup: 20181119-152810F, status: invalid, total files checked: 0, total valid files: 0\n"
" backup: 20181119-152900F, status: invalid, total files checked: 3, total valid files: 2\n"
" checksum invalid: 1\n"
" backup: 20181119-152900F_20181119-152909D, status: invalid, total files checked: 5,"
" total valid files: 2\n"
" backup: 20181119-152900F_20181119-152909D, status: invalid, total files checked: 6,"
" total valid files: 3\n"
" missing: 1, checksum invalid: 1, other: 1");
harnessLogLevelReset();
@ -1300,7 +1306,7 @@ testRun(void)
" backup: 20181119-152810F, status: invalid, total files checked: 0, total valid files: 0\n"
" backup: 20181119-152900F, status: invalid, total files checked: 3, total valid files: 2\n"
" checksum invalid: 1\n"
" backup: 20181119-152900F_20181119-152909D, status: invalid, total files checked: 5, total valid files: 2\n"
" backup: 20181119-152900F_20181119-152909D, status: invalid, total files checked: 6, total valid files: 3\n"
" missing: 1, checksum invalid: 1, other: 1", "verify text output, not verbose, with verify failures");
TEST_RESULT_LOG(
"P01 INFO: invalid checksum "

View File

@ -1474,7 +1474,8 @@ testRun(void)
"[target:file]\n" \
"pg_data/=equal=more=={\"mode\":\"0640\",\"size\":0,\"timestamp\":1565282120}\n" \
"pg_data/PG_VERSION={\"checksum\":\"184473f470864e067ee3a22e64b47b0a1c356f29\"" \
",\"reference\":\"20190818-084502F_20190819-084506D\",\"size\":4,\"timestamp\":1565282114}\n" \
",\"rck\":\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\",\"reference\":\"20190818-084502F_20190819-084506D\"" \
",\"size\":4,\"timestamp\":1565282114}\n" \
"pg_data/base/16384/17000={\"bni\":1,\"checksum\":\"e0101dd8ffb910c9c202ca35b5f828bcb9697bed\",\"checksum-page\":false"\
",\"checksum-page-error\":[1],\"repo-size\":4096,\"size\":8192,\"timestamp\":1565282114}\n" \
"pg_data/base/16384/PG_VERSION={\"bni\":1,\"bno\":1,\"checksum\":\"184473f470864e067ee3a22e64b47b0a1c356f29\"" \