1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2024-12-12 10:04:14 +02:00

Bundle files in the repository during backup.

Bundle (combine) smaller files during backup to reduce the number of files written to the repository (enable with --bundle). Reducing the number of files is a benefit on all file systems, but especially so on object stores such as S3 that have a high file creation cost. Another benefit is that zero-length files are only stored as metadata in the manifest.

Files are batched up to bundle-size and then compressed/encrypted individually and stored sequentially in the bundle. The bundle id and offset of each file is stored in the manifest so files can be retrieved randomly without needing to read the entire bundle. Files are ordered by timestamp descending when being assigned to bundles to reduce the amount of random access that needs to be done. The idea is that bundles with older files can be read in their entirety on restore and only bundles with newer files will get fragmented.

Bundles are a custom format with metadata stored in the manifest. Tar was considered but it is too limited a format, the major issue being that the size of the file must be known in advance and that is very contrary to how pgBackRest works, especially once we introduce page-level incremental backups.

Bundles are stored numbered in the bundle directory. Some files may still end up in pg_data if they are added after the backup is complete. backup_label is an example.

Currently, only the backup command works in batches. The restore and verify commands use the offsets to pull individual files out of the bundle. It seems better to finalize how this is going to work before optimizing the other commands. Even as is, this is a major step forward, and all commands function with bundling.

One caveat: resume is currently not supported when bundle is enabled.
This commit is contained in:
David Steele 2022-02-14 13:24:14 -06:00 committed by GitHub
parent 8046f06307
commit 34d649579e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 1668 additions and 689 deletions

View File

@ -128,6 +128,7 @@
<commit subject="Optimization for jsonFromStrInternal()."/>
<commit subject="Simplify manifest file defaults."/>
<commit subject="Simplify filename construction in command/verify module."/>
<commit subject="Bundle files in the repository during backup."/>
<release-item-contributor-list>
<release-item-contributor id="david.steele"/>

View File

@ -1106,6 +1106,31 @@ option:
command-role:
main: {}
bundle:
internal: true
section: global
type: boolean
default: false
command:
backup: {}
command-role:
main: {}
bundle-size:
internal: true
section: global
type: size
default: 100MiB
allow-range: [1MiB, 1PiB]
command:
backup: {}
command-role:
main: {}
depend:
option: bundle
list:
- true
checksum-page:
section: global
type: boolean
@ -1151,6 +1176,10 @@ option:
backup: {}
command-role:
main: {}
depend:
option: bundle
list:
- false
start-fast:
section: global

View File

@ -1109,6 +1109,30 @@
<example>y</example>
</config-key>
<!-- ======================================================================================================= -->
<config-key id="bundle" name="Repository Bundles">
<summary>Bundle files in repository.</summary>
<text>
<p>Bundle (combine) smaller files to reduce the total number of files written to the repository. Writing fewer files is generally more efficient, especially on object stores such as <proper>S3</proper>. In addition, zero-length files are not stored (except in the manifest), which saves time and space.</p>
</text>
<example>y</example>
</config-key>
<!-- ======================================================================================================= -->
<config-key id="bundle-size" name="Repository Bundle Size">
<summary>Target size for file bundles.</summary>
<text>
<p>Defines the target size of bundled files. Most bundles will be smaller than this target but it is possible that some will be slightly larger, so do not set this option to the maximum size that your file system allows.</p>
<p>In general, it is not a good idea to set this option too high because retries will need to redo the entire bundle.</p>
</text>
<example>10MiB</example>
</config-key>
<!-- CONFIG - BACKUP SECTION - CHECKSUM-PAGE KEY -->
<config-key id="checksum-page" name="Page Checksums">
<summary>Validate data page checksums.</summary>

View File

@ -610,7 +610,7 @@ void backupResumeCallback(void *data, const StorageInfo *info)
{
manifestFileUpdate(
resumeData->manifest, manifestName, file.size, fileResume.sizeRepo, fileResume.checksumSha1, NULL,
fileResume.checksumPage, fileResume.checksumPageError, fileResume.checksumPageErrorList);
fileResume.checksumPage, fileResume.checksumPageError, fileResume.checksumPageErrorList, 0, 0);
}
}
}
@ -674,6 +674,7 @@ backupResumeFind(const Manifest *manifest, const String *cipherPassBackup)
// Resumable backups do not have backup.manifest
if (!storageExistsP(storageRepo(), manifestFile))
{
const bool resume = cfgOptionTest(cfgOptResume) && cfgOptionBool(cfgOptResume);
bool usable = false;
const String *reason = STRDEF("partially deleted by prior resume or invalid");
Manifest *manifestResume = NULL;
@ -685,7 +686,7 @@ backupResumeFind(const Manifest *manifest, const String *cipherPassBackup)
// Attempt to read the manifest file in the resumable backup to see if it can be used. If any error at all
// occurs then the backup will be considered unusable and a resume will not be attempted.
if (cfgOptionBool(cfgOptResume))
if (resume)
{
TRY_BEGIN()
{
@ -751,7 +752,9 @@ backupResumeFind(const Manifest *manifest, const String *cipherPassBackup)
// Else warn and remove the unusable backup
else
{
LOG_WARN_FMT("backup '%s' cannot be resumed: %s", strZ(backupLabel), strZ(reason));
LOG_FMT(
resume ? logLevelWarn : logLevelInfo, 0, "backup '%s' cannot be resumed: %s", strZ(backupLabel),
strZ(reason));
storagePathRemoveP(
storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s", strZ(backupLabel)), .recurse = true);
@ -1148,21 +1151,22 @@ Log the results of a job and throw errors
***********************************************************************************************************************************/
static void
backupJobResult(
Manifest *manifest, const String *host, const String *const fileName, StringList *fileRemove, ProtocolParallelJob *const job,
const uint64_t sizeTotal, uint64_t *sizeProgress)
Manifest *const manifest, const String *const host, const Storage *const storagePg, StringList *const fileRemove,
ProtocolParallelJob *const job, const bool bundle, const uint64_t sizeTotal, uint64_t *const sizeProgress)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(MANIFEST, manifest);
FUNCTION_LOG_PARAM(STRING, host);
FUNCTION_LOG_PARAM(STRING, fileName);
FUNCTION_LOG_PARAM(STORAGE, storagePg);
FUNCTION_LOG_PARAM(STRING_LIST, fileRemove);
FUNCTION_LOG_PARAM(PROTOCOL_PARALLEL_JOB, job);
FUNCTION_LOG_PARAM(BOOL, bundle);
FUNCTION_LOG_PARAM(UINT64, sizeTotal);
FUNCTION_LOG_PARAM_P(UINT64, sizeProgress);
FUNCTION_LOG_END();
ASSERT(manifest != NULL);
ASSERT(fileName != NULL);
ASSERT(storagePg != NULL);
ASSERT(fileRemove != NULL);
ASSERT(job != NULL);
@ -1171,143 +1175,150 @@ backupJobResult(
{
MEM_CONTEXT_TEMP_BEGIN()
{
const ManifestFile file = manifestFileFind(manifest, varStr(protocolParallelJobKey(job)));
const unsigned int processId = protocolParallelJobProcessId(job);
const uint64_t bundleId = bundle ? varUInt64(protocolParallelJobKey(job)) : 0;
PackRead *const jobResult = protocolParallelJobResult(job);
const BackupCopyResult copyResult = (BackupCopyResult)pckReadU32P(jobResult);
const uint64_t copySize = pckReadU64P(jobResult);
const uint64_t repoSize = pckReadU64P(jobResult);
const String *const copyChecksum = pckReadStrP(jobResult);
PackRead *const checksumPageResult = pckReadPackReadP(jobResult);
// Increment backup copy progress
*sizeProgress += copySize;
while (!pckReadNullP(jobResult))
{
const ManifestFile file = manifestFileFind(manifest, pckReadStrP(jobResult));
const BackupCopyResult copyResult = (BackupCopyResult)pckReadU32P(jobResult);
const uint64_t copySize = pckReadU64P(jobResult);
const uint64_t bundleOffset = pckReadU64P(jobResult);
const uint64_t repoSize = pckReadU64P(jobResult);
const String *const copyChecksum = pckReadStrP(jobResult);
PackRead *const checksumPageResult = pckReadPackReadP(jobResult);
// Create log file name
const String *fileLog = host == NULL ? fileName : strNewFmt("%s:%s", strZ(host), strZ(fileName));
// Increment backup copy progress
*sizeProgress += copySize;
// Format log strings
const String *const logProgress =
strNewFmt(
"%s, %" PRIu64 "%%", strZ(strSizeFormat(copySize)), sizeTotal == 0 ? 100 : *sizeProgress * 100 / sizeTotal);
const String *const logChecksum = copySize != 0 ? strNewFmt(" checksum %s", strZ(copyChecksum)) : EMPTY_STR;
// Create log file name
const String *const fileName = storagePathP(storagePg, manifestPathPg(file.name));
const String *fileLog = host == NULL ? fileName : strNewFmt("%s:%s", strZ(host), strZ(fileName));
// If the file is in a prior backup and nothing changed, just log it
if (copyResult == backupCopyResultNoOp)
{
LOG_DETAIL_PID_FMT(
processId, "match file from prior backup %s (%s)%s", strZ(fileLog), strZ(logProgress), strZ(logChecksum));
}
// Else if the repo matched the expect checksum, just log it
else if (copyResult == backupCopyResultChecksum)
{
LOG_DETAIL_PID_FMT(
processId, "checksum resumed file %s (%s)%s", strZ(fileLog), strZ(logProgress), strZ(logChecksum));
}
// Else if the file was removed during backup add it to the list of files to be removed from the manifest when the
// backup is complete. It can't be removed right now because that will invalidate the pointers that are being used for
// processing.
else if (copyResult == backupCopyResultSkip)
{
LOG_DETAIL_PID_FMT(processId, "skip file removed by database %s", strZ(fileLog));
strLstAdd(fileRemove, file.name);
}
// Else file was copied so update manifest
else
{
// If the file had to be recopied then warn that there may be an issue with corruption in the repository
// ??? This should really be below the message below for more context -- can be moved after the migration
// ??? The name should be a pg path not manifest name -- can be fixed after the migration
if (copyResult == backupCopyResultReCopy)
// Format log strings
const String *const logProgress =
strNewFmt(
"%s, %" PRIu64 "%%", strZ(strSizeFormat(copySize)), sizeTotal == 0 ? 100 : *sizeProgress * 100 / sizeTotal);
const String *const logChecksum = copySize != 0 ? strNewFmt(" checksum %s", strZ(copyChecksum)) : EMPTY_STR;
// If the file is in a prior backup and nothing changed, just log it
if (copyResult == backupCopyResultNoOp)
{
LOG_WARN_FMT(
"resumed backup file %s does not have expected checksum %s. The file will be recopied and backup will"
" continue but this may be an issue unless the resumed backup path in the repository is known to be"
" corrupted.\n"
"NOTE: this does not indicate a problem with the PostgreSQL page checksums.",
strZ(file.name), file.checksumSha1);
LOG_DETAIL_PID_FMT(
processId, "match file from prior backup %s (%s)%s", strZ(fileLog), strZ(logProgress), strZ(logChecksum));
}
LOG_DETAIL_PID_FMT(processId, "backup file %s (%s)%s", strZ(fileLog), strZ(logProgress), strZ(logChecksum));
// If the file had page checksums calculated during the copy
ASSERT((!file.checksumPage && checksumPageResult == NULL) || (file.checksumPage && checksumPageResult != NULL));
bool checksumPageError = false;
const VariantList *checksumPageErrorList = NULL;
if (checksumPageResult != NULL)
// Else if the repo matched the expect checksum, just log it
else if (copyResult == backupCopyResultChecksum)
{
checksumPageErrorList = backupJobResultPageChecksum(checksumPageResult);
// If the checksum was valid
if (!pckReadBoolP(checksumPageResult))
LOG_DETAIL_PID_FMT(
processId, "checksum resumed file %s (%s)%s", strZ(fileLog), strZ(logProgress), strZ(logChecksum));
}
// Else if the file was removed during backup add it to the list of files to be removed from the manifest when the
// backup is complete. It can't be removed right now because that will invalidate the pointers that are being used for
// processing.
else if (copyResult == backupCopyResultSkip)
{
LOG_DETAIL_PID_FMT(processId, "skip file removed by database %s", strZ(fileLog));
strLstAdd(fileRemove, file.name);
}
// Else file was copied so update manifest
else
{
// If the file had to be recopied then warn that there may be an issue with corruption in the repository
// ??? This should really be below the message below for more context -- can be moved after the migration
// ??? The name should be a pg path not manifest name -- can be fixed after the migration
if (copyResult == backupCopyResultReCopy)
{
checksumPageError = true;
LOG_WARN_FMT(
"resumed backup file %s does not have expected checksum %s. The file will be recopied and backup will"
" continue but this may be an issue unless the resumed backup path in the repository is known to be"
" corrupted.\n"
"NOTE: this does not indicate a problem with the PostgreSQL page checksums.",
strZ(file.name), file.checksumSha1);
}
LOG_DETAIL_PID_FMT(processId, "backup file %s (%s)%s", strZ(fileLog), strZ(logProgress), strZ(logChecksum));
// If the file had page checksums calculated during the copy
ASSERT((!file.checksumPage && checksumPageResult == NULL) || (file.checksumPage && checksumPageResult != NULL));
bool checksumPageError = false;
const VariantList *checksumPageErrorList = NULL;
if (checksumPageResult != NULL)
{
checksumPageErrorList = backupJobResultPageChecksum(checksumPageResult);
// If the checksum was valid
if (!pckReadBoolP(checksumPageResult))
{
checksumPageErrorList = NULL;
checksumPageError = true;
// ??? Update formatting after migration
LOG_WARN_FMT(
"page misalignment in file %s: file size %" PRIu64 " is not divisible by page size %u",
strZ(fileLog), copySize, PG_PAGE_SIZE_DEFAULT);
}
else
{
// Format the page checksum errors
CHECK(FormatError, checksumPageErrorList != NULL, "page checksum error list is missing");
CHECK(FormatError, !varLstEmpty(checksumPageErrorList), "page checksum error list is empty");
String *error = strNew();
unsigned int errorTotalMin = 0;
for (unsigned int errorIdx = 0; errorIdx < varLstSize(checksumPageErrorList); errorIdx++)
if (!pckReadBoolP(checksumPageResult))
{
const Variant *const errorItem = varLstGet(checksumPageErrorList, errorIdx);
checksumPageErrorList = NULL;
// Add a comma if this is not the first item
if (errorIdx != 0)
strCatZ(error, ", ");
// If an error range
if (varType(errorItem) == varTypeVariantList)
{
const VariantList *const errorItemList = varVarLst(errorItem);
ASSERT(varLstSize(errorItemList) == 2);
strCatFmt(
error, "%" PRIu64 "-%" PRIu64, varUInt64(varLstGet(errorItemList, 0)),
varUInt64(varLstGet(errorItemList, 1)));
errorTotalMin += 2;
}
// Else a single error
else
{
ASSERT(varType(errorItem) == varTypeUInt64);
strCatFmt(error, "%" PRIu64, varUInt64(errorItem));
errorTotalMin++;
}
// ??? Update formatting after migration
LOG_WARN_FMT(
"page misalignment in file %s: file size %" PRIu64 " is not divisible by page size %u",
strZ(fileLog), copySize, PG_PAGE_SIZE_DEFAULT);
}
else
{
// Format the page checksum errors
CHECK(FormatError, checksumPageErrorList != NULL, "page checksum error list is missing");
CHECK(FormatError, !varLstEmpty(checksumPageErrorList), "page checksum error list is empty");
// Make message plural when appropriate
const String *const plural = errorTotalMin > 1 ? STRDEF("s") : EMPTY_STR;
String *error = strNew();
unsigned int errorTotalMin = 0;
// ??? Update formatting after migration
LOG_WARN_FMT(
"invalid page checksum%s found in file %s at page%s %s", strZ(plural), strZ(fileLog), strZ(plural),
strZ(error));
for (unsigned int errorIdx = 0; errorIdx < varLstSize(checksumPageErrorList); errorIdx++)
{
const Variant *const errorItem = varLstGet(checksumPageErrorList, errorIdx);
// Add a comma if this is not the first item
if (errorIdx != 0)
strCatZ(error, ", ");
// If an error range
if (varType(errorItem) == varTypeVariantList)
{
const VariantList *const errorItemList = varVarLst(errorItem);
ASSERT(varLstSize(errorItemList) == 2);
strCatFmt(
error, "%" PRIu64 "-%" PRIu64, varUInt64(varLstGet(errorItemList, 0)),
varUInt64(varLstGet(errorItemList, 1)));
errorTotalMin += 2;
}
// Else a single error
else
{
ASSERT(varType(errorItem) == varTypeUInt64);
strCatFmt(error, "%" PRIu64, varUInt64(errorItem));
errorTotalMin++;
}
}
// Make message plural when appropriate
const String *const plural = errorTotalMin > 1 ? STRDEF("s") : EMPTY_STR;
// ??? Update formatting after migration
LOG_WARN_FMT(
"invalid page checksum%s found in file %s at page%s %s", strZ(plural), strZ(fileLog),
strZ(plural), strZ(error));
}
}
}
}
// Update file info and remove any reference to the file's existence in a prior backup
manifestFileUpdate(
manifest, file.name, copySize, repoSize, strZ(copyChecksum), VARSTR(NULL), file.checksumPage,
checksumPageError, checksumPageErrorList != NULL ? jsonFromVar(varNewVarLst(checksumPageErrorList)) : NULL);
// Update file info and remove any reference to the file's existence in a prior backup
manifestFileUpdate(
manifest, file.name, copySize, repoSize, strZ(copyChecksum), VARSTR(NULL), file.checksumPage,
checksumPageError, checksumPageErrorList != NULL ? jsonFromVar(varNewVarLst(checksumPageErrorList)) : NULL,
bundleId, bundleOffset);
}
}
}
MEM_CONTEXT_TEMP_END();
@ -1395,6 +1406,9 @@ typedef struct BackupJobData
const int compressLevel; // Compress level if backup is compressed
const bool delta; // Is this a checksum delta backup?
const uint64_t lsnStart; // Starting lsn for the backup
const bool bundle; // Bundle files?
const uint64_t bundleSize; // Target bundle size
uint64_t bundleId; // Bundle id
List *queueList; // List of processing queues
} BackupJobData;
@ -1415,8 +1429,10 @@ backupProcessFilePrimary(RegExp *const standbyExp, const String *const name)
strEqZ(name, MANIFEST_TARGET_PGDATA "/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL) || !regExpMatch(standbyExp, name));
}
// Comparator to order ManifestFile objects by size then name
// Comparator to order ManifestFile objects by size, date, and name
static const Manifest *backupProcessQueueComparatorManifest = NULL;
static bool backupProcessQueueComparatorBundle;
static uint64_t backupProcessQueueComparatorBundleSize;
static int
backupProcessQueueComparator(const void *item1, const void *item2)
@ -1434,20 +1450,34 @@ backupProcessQueueComparator(const void *item1, const void *item2)
ManifestFile file2 = manifestFileUnpack(backupProcessQueueComparatorManifest, *(const ManifestFilePack **)item2);
// If the size differs then that's enough to determine order
if (file1.size < file2.size)
FUNCTION_TEST_RETURN(-1);
else if (file1.size > file2.size)
FUNCTION_TEST_RETURN(1);
if (!backupProcessQueueComparatorBundle || file1.size >= backupProcessQueueComparatorBundleSize ||
file2.size >= backupProcessQueueComparatorBundleSize)
{
if (file1.size < file2.size)
FUNCTION_TEST_RETURN(-1);
else if (file1.size > file2.size)
FUNCTION_TEST_RETURN(1);
}
// If size is the same then use name to generate a deterministic ordering (names must be unique)
// If bundling order by time ascending so that older files are bundled with older files and newer with newer
if (backupProcessQueueComparatorBundle)
{
if (file1.timestamp > file2.timestamp)
FUNCTION_TEST_RETURN(-1);
else if (file1.timestamp < file2.timestamp)
FUNCTION_TEST_RETURN(1);
}
// If size/time is the same then use name to generate a deterministic ordering (names must be unique)
FUNCTION_TEST_RETURN(strCmp(file1.name, file2.name));
}
// Helper to generate the backup queues
static uint64_t
backupProcessQueue(Manifest *const manifest, BackupJobData *const jobData)
backupProcessQueue(const BackupData *const backupData, Manifest *const manifest, BackupJobData *const jobData)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(BACKUP_DATA, backupData);
FUNCTION_LOG_PARAM(MANIFEST, manifest);
FUNCTION_LOG_PARAM_P(VOID, jobData);
FUNCTION_LOG_END();
@ -1499,6 +1529,17 @@ backupProcessQueue(Manifest *const manifest, BackupJobData *const jobData)
if (file.reference != NULL && (!jobData->delta || file.size == 0))
continue;
// If bundling store zero-length files immediately in the manifest without copying them
if (jobData->bundle && file.size == 0)
{
LOG_DETAIL_FMT(
"store zero-length file %s", strZ(storagePathP(backupData->storagePrimary, manifestPathPg(file.name))));
manifestFileUpdate(
manifest, file.name, 0, 0, strZ(HASH_TYPE_SHA1_ZERO_STR), VARSTR(NULL), file.checksumPage, false, NULL, 0, 0);
continue;
}
// Is pg_control in the backup?
if (strEq(file.name, STRDEF(MANIFEST_TARGET_PGDATA "/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL)))
pgControlFound = true;
@ -1552,6 +1593,8 @@ backupProcessQueue(Manifest *const manifest, BackupJobData *const jobData)
// Sort the queues
backupProcessQueueComparatorManifest = manifest;
backupProcessQueueComparatorBundle = jobData->bundle;
backupProcessQueueComparatorBundleSize = jobData->bundleSize;
for (unsigned int queueIdx = 0; queueIdx < lstSize(jobData->queueList); queueIdx++)
lstSort(*(List **)lstGet(jobData->queueList, queueIdx), sortOrderDesc);
@ -1610,17 +1653,47 @@ static ProtocolParallelJob *backupJobCallback(void *data, unsigned int clientIdx
0 : (int)(clientIdx % (lstSize(jobData->queueList) - queueOffset));
int queueEnd = queueIdx;
// Create backup job
ProtocolCommand *command = protocolCommandNew(PROTOCOL_COMMAND_BACKUP_FILE);
PackWrite *param = NULL;
uint64_t fileTotal = 0;
uint64_t fileSize = 0;
do
{
List *queue = *(List **)lstGet(jobData->queueList, (unsigned int)queueIdx + queueOffset);
unsigned int fileIdx = 0;
if (!lstEmpty(queue))
while (fileIdx < lstSize(queue))
{
const ManifestFile file = manifestFileUnpack(jobData->manifest, *(ManifestFilePack **)lstGet(queue, 0));
const ManifestFile file = manifestFileUnpack(jobData->manifest, *(ManifestFilePack **)lstGet(queue, fileIdx));
// Create backup job
ProtocolCommand *command = protocolCommandNew(PROTOCOL_COMMAND_BACKUP_FILE);
PackWrite *const param = protocolCommandParam(command);
// Continue if the next file would make the bundle too large. There may be a smaller one that will fit.
if (fileTotal > 0 && fileSize + file.size >= jobData->bundleSize)
{
fileIdx++;
continue;
}
// Add common parameters before first file
if (param == NULL)
{
param = protocolCommandParam(command);
String *const repoFile = strCatFmt(strNew(), STORAGE_REPO_BACKUP "/%s/", strZ(jobData->backupLabel));
if (jobData->bundle)
strCatFmt(repoFile, MANIFEST_PATH_BUNDLE "/%" PRIu64, jobData->bundleId);
else
strCatFmt(repoFile, "%s%s", strZ(file.name), strZ(compressExtStr(jobData->compressType)));
pckWriteStrP(param, repoFile);
pckWriteU32P(param, jobData->compressType);
pckWriteI32P(param, jobData->compressLevel);
pckWriteBoolP(param, jobData->delta);
pckWriteU64P(param, jobData->cipherSubPass == NULL ? cipherTypeNone : cipherTypeAes256Cbc);
pckWriteStrP(param, jobData->cipherSubPass);
}
pckWriteStrP(param, manifestPathPg(file.name));
pckWriteBoolP(param, !strEq(file.name, STRDEF(MANIFEST_TARGET_PGDATA "/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL)));
@ -1631,24 +1704,28 @@ static ProtocolParallelJob *backupJobCallback(void *data, unsigned int clientIdx
pckWriteU64P(param, jobData->lsnStart);
pckWriteStrP(param, file.name);
pckWriteBoolP(param, file.reference != NULL);
pckWriteU32P(param, jobData->compressType);
pckWriteI32P(param, jobData->compressLevel);
pckWriteStrP(param, jobData->backupLabel);
pckWriteBoolP(param, jobData->delta);
pckWriteU64P(param, jobData->cipherSubPass == NULL ? cipherTypeNone : cipherTypeAes256Cbc);
pckWriteStrP(param, jobData->cipherSubPass);
fileTotal++;
fileSize += file.size;
// Remove job from the queue
lstRemoveIdx(queue, 0);
lstRemoveIdx(queue, fileIdx);
// Break if not bundling or bundle size has been reached
if (!jobData->bundle || fileSize >= jobData->bundleSize)
break;
}
if (fileTotal > 0)
{
// Assign job to result
MEM_CONTEXT_PRIOR_BEGIN()
{
result = protocolParallelJobNew(VARSTR(file.name), command);
result = protocolParallelJobNew(VARUINT64(jobData->bundleId), command);
jobData->bundleId++;
}
MEM_CONTEXT_PRIOR_END();
// Break out of the loop early since we found a job
break;
}
@ -1686,10 +1763,33 @@ backupProcess(BackupData *backupData, Manifest *manifest, const String *lsnStart
bool hardLink = cfgOptionBool(cfgOptRepoHardlink) && storageFeature(storageRepoWrite(), storageFeatureHardLink);
bool backupStandby = cfgOptionBool(cfgOptBackupStandby);
BackupJobData jobData =
{
.manifest = manifest,
.backupLabel = backupLabel,
.backupStandby = backupStandby,
.compressType = compressTypeEnum(cfgOptionStrId(cfgOptCompressType)),
.compressLevel = cfgOptionInt(cfgOptCompressLevel),
.cipherType = cfgOptionStrId(cfgOptRepoCipherType),
.cipherSubPass = manifestCipherSubPass(manifest),
.delta = cfgOptionBool(cfgOptDelta),
.lsnStart = cfgOptionBool(cfgOptOnline) ? pgLsnFromStr(lsnStart) : 0xFFFFFFFFFFFFFFFF,
.bundle = cfgOptionBool(cfgOptBundle),
.bundleSize = cfgOptionTest(cfgOptBundleSize) ? cfgOptionUInt64(cfgOptBundleSize) : 0,
.bundleId = 1,
// Build expression to identify files that can be copied from the standby when standby backup is supported
.standbyExp = regExpNew(
strNewFmt(
"^((" MANIFEST_TARGET_PGDATA "/(" PG_PATH_BASE "|" PG_PATH_GLOBAL "|%s|" PG_PATH_PGMULTIXACT "))|"
MANIFEST_TARGET_PGTBLSPC ")/",
strZ(pgXactPath(backupData->version)))),
};
// If this is a full backup or hard-linked and paths are supported then create all paths explicitly so that empty paths will
// exist in to repo. Also create tablespace symlinks when symlinks are available. This makes it possible for the user to
// make a copy of the backup path and get a valid cluster.
if (backupType == backupTypeFull || hardLink)
if ((backupType == backupTypeFull && !jobData.bundle) || hardLink)
{
// Create paths when available
if (storageFeature(storageRepoWrite(), storageFeaturePath))
@ -1726,27 +1826,7 @@ backupProcess(BackupData *backupData, Manifest *manifest, const String *lsnStart
}
// Generate processing queues
BackupJobData jobData =
{
.manifest = manifest,
.backupLabel = backupLabel,
.backupStandby = backupStandby,
.compressType = compressTypeEnum(cfgOptionStrId(cfgOptCompressType)),
.compressLevel = cfgOptionInt(cfgOptCompressLevel),
.cipherType = cfgOptionStrId(cfgOptRepoCipherType),
.cipherSubPass = manifestCipherSubPass(manifest),
.delta = cfgOptionBool(cfgOptDelta),
.lsnStart = cfgOptionBool(cfgOptOnline) ? pgLsnFromStr(lsnStart) : 0xFFFFFFFFFFFFFFFF,
// Build expression to identify files that can be copied from the standby when standby backup is supported
.standbyExp = regExpNew(
strNewFmt(
"^((" MANIFEST_TARGET_PGDATA "/(" PG_PATH_BASE "|" PG_PATH_GLOBAL "|%s|" PG_PATH_PGMULTIXACT "))|"
MANIFEST_TARGET_PGTBLSPC ")/",
strZ(pgXactPath(backupData->version)))),
};
sizeTotal = backupProcessQueue(manifest, &jobData);
sizeTotal = backupProcessQueue(backupData, manifest, &jobData);
// Create the parallel executor
ProtocolParallel *parallelExec = protocolParallelNew(
@ -1789,10 +1869,8 @@ backupProcess(BackupData *backupData, Manifest *manifest, const String *lsnStart
backupJobResult(
manifest,
backupStandby && protocolParallelJobProcessId(job) > 1 ? backupData->hostStandby : backupData->hostPrimary,
storagePathP(
protocolParallelJobProcessId(job) > 1 ? storagePgIdx(pgIdx) : backupData->storagePrimary,
manifestPathPg(manifestFileFind(manifest, varStr(protocolParallelJobKey(job))).name)), fileRemove, job,
sizeTotal, &sizeProgress);
protocolParallelJobProcessId(job) > 1 ? storagePgIdx(pgIdx) : backupData->storagePrimary,
fileRemove, job, jobData.bundle, sizeTotal, &sizeProgress);
}
// A keep-alive is required here for the remote holding open the backup connection
@ -1866,7 +1944,9 @@ backupProcess(BackupData *backupData, Manifest *manifest, const String *lsnStart
{
const String *const path = strNewFmt("%s/%s", strZ(backupPathExp), strZ(manifestPath(manifest, pathIdx)->name));
if (backupType == backupTypeFull || hardLink || storagePathExistsP(storageRepo(), path))
// Always sync the path if it exists or if the backup is full (without bundling) or hardlinked. In the latter cases
// the directory should always exist so we want to error if it does not.
if ((backupType == backupTypeFull && !jobData.bundle) || hardLink || storagePathExistsP(storageRepo(), path))
storagePathSyncP(storageRepoWrite(), path);
}
}
@ -2125,7 +2205,8 @@ cmdBackup(void)
// Build the manifest
Manifest *manifest = manifestNewBuild(
backupData->storagePrimary, infoPg.version, infoPg.catalogVersion, cfgOptionBool(cfgOptOnline),
cfgOptionBool(cfgOptChecksumPage), strLstNewVarLst(cfgOptionLst(cfgOptExclude)), backupStartResult.tablespaceList);
cfgOptionBool(cfgOptChecksumPage), cfgOptionBool(cfgOptBundle), strLstNewVarLst(cfgOptionLst(cfgOptExclude)),
backupStartResult.tablespaceList);
// Validate the manifest using the copy start time
manifestBuildValidate(

View File

@ -17,6 +17,7 @@ Backup File
#include "common/regExp.h"
#include "common/type/convert.h"
#include "common/type/json.h"
#include "info/manifest.h"
#include "postgres/interface.h"
#include "storage/helper.h"
@ -35,232 +36,266 @@ segmentNumber(const String *pgFile)
}
/**********************************************************************************************************************************/
BackupFileResult
List *
backupFile(
const String *pgFile, bool pgFileIgnoreMissing, uint64_t pgFileSize, bool pgFileCopyExactSize, const String *pgFileChecksum,
bool pgFileChecksumPage, uint64_t pgFileChecksumPageLsnLimit, const String *repoFile, bool repoFileHasReference,
CompressType repoFileCompressType, int repoFileCompressLevel, const String *backupLabel, bool delta, CipherType cipherType,
const String *cipherPass)
const String *const repoFile, const CompressType repoFileCompressType, const int repoFileCompressLevel,
const bool delta, const CipherType cipherType, const String *const cipherPass, const List *const fileList)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STRING, pgFile); // Database file to copy to the repo
FUNCTION_LOG_PARAM(BOOL, pgFileIgnoreMissing); // Is it OK if the database file is missing?
FUNCTION_LOG_PARAM(UINT64, pgFileSize); // Size of the database file
FUNCTION_LOG_PARAM(BOOL, pgFileCopyExactSize); // Copy only pgFileSize bytes even if the file has grown
FUNCTION_LOG_PARAM(STRING, pgFileChecksum); // Checksum to verify the database file
FUNCTION_LOG_PARAM(BOOL, pgFileChecksumPage); // Should page checksums be validated
FUNCTION_LOG_PARAM(UINT64, pgFileChecksumPageLsnLimit); // Upper LSN limit to which page checksums must be valid
FUNCTION_LOG_PARAM(STRING, repoFile); // Destination in the repo to copy the pg file
FUNCTION_LOG_PARAM(BOOL, repoFileHasReference); // Does the repo file exist in a prior backup in the set?
FUNCTION_LOG_PARAM(STRING, repoFile); // Repo file
FUNCTION_LOG_PARAM(ENUM, repoFileCompressType); // Compress type for repo file
FUNCTION_LOG_PARAM(INT, repoFileCompressLevel); // Compression level for repo file
FUNCTION_LOG_PARAM(STRING, backupLabel); // Label of current backup
FUNCTION_LOG_PARAM(BOOL, delta); // Is the delta option on?
FUNCTION_LOG_PARAM(STRING_ID, cipherType); // Encryption type
FUNCTION_TEST_PARAM(STRING, cipherPass); // Password to access the repo file if encrypted
FUNCTION_LOG_PARAM(LIST, fileList); // List of files to backup
FUNCTION_LOG_END();
ASSERT(pgFile != NULL);
ASSERT(repoFile != NULL);
ASSERT(backupLabel != NULL);
ASSERT((cipherType == cipherTypeNone && cipherPass == NULL) || (cipherType != cipherTypeNone && cipherPass != NULL));
ASSERT(fileList != NULL && !lstEmpty(fileList));
// Backup file results
BackupFileResult result = {.backupCopyResult = backupCopyResultCopy};
List *result = NULL;
MEM_CONTEXT_TEMP_BEGIN()
{
// Generate complete repo path and add compression extension if needed
const String *repoPathFile = strNewFmt(
STORAGE_REPO_BACKUP "/%s/%s%s", strZ(backupLabel), strZ(repoFile), strZ(compressExtStr(repoFileCompressType)));
result = lstNewP(sizeof(BackupFileResult));
// If checksum is defined then the file needs to be checked. If delta option then check the DB and possibly the repo, else
// just check the repo.
if (pgFileChecksum != NULL)
// Check files to determine which ones need to be copied
for (unsigned int fileIdx = 0; fileIdx < lstSize(fileList); fileIdx++)
{
// Does the file in pg match the checksum and size passed?
bool pgFileMatch = false;
const BackupFile *const file = lstGet(fileList, fileIdx);
ASSERT(file->pgFile != NULL);
ASSERT(file->manifestFile != NULL);
// If delta, then check the DB checksum and possibly the repo. If the checksum does not match in either case then
// recopy.
if (delta)
BackupFileResult *const fileResult = lstAdd(
result, &(BackupFileResult){.manifestFile = file->manifestFile, .backupCopyResult = backupCopyResultCopy});
// If checksum is defined then the file needs to be checked. If delta option then check the DB and possibly the repo,
// else just check the repo.
if (file->pgFileChecksum != NULL)
{
// Generate checksum/size for the pg file. Only read as many bytes as passed in pgFileSize. If the file has grown
// since the manifest was built we don't need to consider the extra bytes since they will be replayed from WAL
// during recovery.
IoRead *read = storageReadIo(
storageNewReadP(
storagePg(), pgFile, .ignoreMissing = pgFileIgnoreMissing,
.limit = pgFileCopyExactSize ? VARUINT64(pgFileSize) : NULL));
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(HASH_TYPE_SHA1_STR));
ioFilterGroupAdd(ioReadFilterGroup(read), ioSizeNew());
// Does the file in pg match the checksum and size passed?
bool pgFileMatch = false;
// If the pg file exists check the checksum/size
if (ioReadDrain(read))
// If delta, then check the DB checksum and possibly the repo. If the checksum does not match in either case then
// recopy.
if (delta)
{
const String *pgTestChecksum = pckReadStrP(
ioFilterGroupResultP(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE));
uint64_t pgTestSize = pckReadU64P(ioFilterGroupResultP(ioReadFilterGroup(read), SIZE_FILTER_TYPE));
// Generate checksum/size for the pg file. Only read as many bytes as passed in pgFileSize. If the file has
// grown since the manifest was built we don't need to consider the extra bytes since they will be replayed from
// WAL during recovery.
IoRead *read = storageReadIo(
storageNewReadP(
storagePg(), file->pgFile, .ignoreMissing = file->pgFileIgnoreMissing,
.limit = file->pgFileCopyExactSize ? VARUINT64(file->pgFileSize) : NULL));
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(HASH_TYPE_SHA1_STR));
ioFilterGroupAdd(ioReadFilterGroup(read), ioSizeNew());
// Does the pg file match?
if (pgFileSize == pgTestSize && strEq(pgFileChecksum, pgTestChecksum))
// If the pg file exists check the checksum/size
if (ioReadDrain(read))
{
pgFileMatch = true;
// If it matches and is a reference to a previous backup then no need to copy the file
if (repoFileHasReference)
{
MEM_CONTEXT_PRIOR_BEGIN()
{
result.backupCopyResult = backupCopyResultNoOp;
result.copySize = pgTestSize;
result.copyChecksum = strDup(pgTestChecksum);
}
MEM_CONTEXT_PRIOR_END();
}
}
}
// Else the source file is missing from the database so skip this file
else
result.backupCopyResult = backupCopyResultSkip;
}
// If this is not a delta backup or it is and the file exists and the checksum from the DB matches, then also test the
// checksum of the file in the repo (unless it is in a prior backup) and if the checksum doesn't match, then there may
// be corruption in the repo, so recopy
if (!delta || !repoFileHasReference)
{
// If this is a delta backup and the file is missing from the DB, then remove it from the repo (backupManifestUpdate
// will remove it from the manifest)
if (result.backupCopyResult == backupCopyResultSkip)
{
storageRemoveP(storageRepoWrite(), repoPathFile);
}
else if (!delta || pgFileMatch)
{
// Check the repo file in a try block because on error (e.g. missing or corrupt file that can't be decrypted or
// decompressed) we should recopy rather than ending the backup.
TRY_BEGIN()
{
// Generate checksum/size for the repo file
IoRead *read = storageReadIo(storageNewReadP(storageRepo(), repoPathFile));
if (cipherType != cipherTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(read), cipherBlockNew(cipherModeDecrypt, cipherType, BUFSTR(cipherPass), NULL));
}
// Decompress the file if compressed
if (repoFileCompressType != compressTypeNone)
ioFilterGroupAdd(ioReadFilterGroup(read), decompressFilter(repoFileCompressType));
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(HASH_TYPE_SHA1_STR));
ioFilterGroupAdd(ioReadFilterGroup(read), ioSizeNew());
ioReadDrain(read);
// Test checksum/size
const String *pgTestChecksum = pckReadStrP(
ioFilterGroupResultP(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE));
uint64_t pgTestSize = pckReadU64P(ioFilterGroupResultP(ioReadFilterGroup(read), SIZE_FILTER_TYPE));
// No need to recopy if checksum/size match
if (pgFileSize == pgTestSize && strEq(pgFileChecksum, pgTestChecksum))
// Does the pg file match?
if (file->pgFileSize == pgTestSize && strEq(file->pgFileChecksum, pgTestChecksum))
{
MEM_CONTEXT_PRIOR_BEGIN()
pgFileMatch = true;
// If it matches and is a reference to a previous backup then no need to copy the file
if (file->manifestFileHasReference)
{
result.backupCopyResult = backupCopyResultChecksum;
result.copySize = pgTestSize;
result.copyChecksum = strDup(pgTestChecksum);
MEM_CONTEXT_BEGIN(lstMemContext(result))
{
fileResult->backupCopyResult = backupCopyResultNoOp;
fileResult->copySize = pgTestSize;
fileResult->copyChecksum = strDup(pgTestChecksum);
}
MEM_CONTEXT_END();
}
MEM_CONTEXT_PRIOR_END();
}
// Else recopy when repo file is not as expected
else
result.backupCopyResult = backupCopyResultReCopy;
}
// Recopy on any kind of error
CATCH_ANY()
{
result.backupCopyResult = backupCopyResultReCopy;
}
TRY_END();
// Else the source file is missing from the database so skip this file
else
fileResult->backupCopyResult = backupCopyResultSkip;
}
}
}
// Copy the file
if (result.backupCopyResult == backupCopyResultCopy || result.backupCopyResult == backupCopyResultReCopy)
{
// Is the file compressible during the copy?
bool compressible = repoFileCompressType == compressTypeNone && cipherType == cipherTypeNone;
// Setup pg file for read. Only read as many bytes as passed in pgFileSize. If the file is growing it does no good to
// copy data past the end of the size recorded in the manifest since those blocks will need to be replayed from WAL
// during recovery.
StorageRead *read = storageNewReadP(
storagePg(), pgFile, .ignoreMissing = pgFileIgnoreMissing, .compressible = compressible,
.limit = pgFileCopyExactSize ? VARUINT64(pgFileSize) : NULL);
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), cryptoHashNew(HASH_TYPE_SHA1_STR));
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), ioSizeNew());
// Add page checksum filter
if (pgFileChecksumPage)
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)), pageChecksumNew(segmentNumber(pgFile), PG_SEGMENT_PAGE_DEFAULT,
pgFileChecksumPageLsnLimit));
}
// Add compression
if (repoFileCompressType != compressTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)), compressFilter(repoFileCompressType, repoFileCompressLevel));
}
// If there is a cipher then add the encrypt filter
if (cipherType != cipherTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(
storageReadIo(read)), cipherBlockNew(cipherModeEncrypt, cipherType, BUFSTR(cipherPass), NULL));
}
// Setup the repo file for write. There is no need to write the file atomically (e.g. via a temp file on Posix) because
// checksums are tested on resume after a failed backup. The path does not need to be synced for each file because all
// paths are synced at the end of the backup.
StorageWrite *write = storageNewWriteP(
storageRepoWrite(), repoPathFile, .compressible = compressible, .noAtomic = true, .noSyncPath = true);
ioFilterGroupAdd(ioWriteFilterGroup(storageWriteIo(write)), ioSizeNew());
// Open the source and destination and copy the file
if (storageCopy(read, write))
{
MEM_CONTEXT_PRIOR_BEGIN()
// If this is not a delta backup or it is and the file exists and the checksum from the DB matches, then also test
// the checksum of the file in the repo (unless it is in a prior backup) and if the checksum doesn't match, then
// there may be corruption in the repo, so recopy
if (!delta || !file->manifestFileHasReference)
{
// Get sizes and checksum
result.copySize = pckReadU64P(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), SIZE_FILTER_TYPE));
result.copyChecksum = strDup(
pckReadStrP(ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE)));
result.repoSize =
pckReadU64P(ioFilterGroupResultP(ioWriteFilterGroup(storageWriteIo(write)), SIZE_FILTER_TYPE));
// Get results of page checksum validation
if (pgFileChecksumPage)
// If this is a delta backup and the file is missing from the DB, then remove it from the repo
// (backupManifestUpdate will remove it from the manifest)
if (fileResult->backupCopyResult == backupCopyResultSkip)
{
result.pageChecksumResult = pckDup(
ioFilterGroupResultPackP(ioReadFilterGroup(storageReadIo(read)), PAGE_CHECKSUM_FILTER_TYPE));
storageRemoveP(storageRepoWrite(), repoFile);
}
else if (!delta || pgFileMatch)
{
// Check the repo file in a try block because on error (e.g. missing or corrupt file that can't be decrypted
// or decompressed) we should recopy rather than ending the backup.
TRY_BEGIN()
{
// Generate checksum/size for the repo file
IoRead *read = storageReadIo(storageNewReadP(storageRepo(), repoFile));
if (cipherType != cipherTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(read),
cipherBlockNew(cipherModeDecrypt, cipherType, BUFSTR(cipherPass), NULL));
}
// Decompress the file if compressed
if (repoFileCompressType != compressTypeNone)
ioFilterGroupAdd(ioReadFilterGroup(read), decompressFilter(repoFileCompressType));
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(HASH_TYPE_SHA1_STR));
ioFilterGroupAdd(ioReadFilterGroup(read), ioSizeNew());
ioReadDrain(read);
// Test checksum/size
const String *pgTestChecksum = pckReadStrP(
ioFilterGroupResultP(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE));
uint64_t pgTestSize = pckReadU64P(ioFilterGroupResultP(ioReadFilterGroup(read), SIZE_FILTER_TYPE));
// No need to recopy if checksum/size match
if (file->pgFileSize == pgTestSize && strEq(file->pgFileChecksum, pgTestChecksum))
{
MEM_CONTEXT_BEGIN(lstMemContext(result))
{
fileResult->backupCopyResult = backupCopyResultChecksum;
fileResult->copySize = pgTestSize;
fileResult->copyChecksum = strDup(pgTestChecksum);
}
MEM_CONTEXT_END();
}
// Else recopy when repo file is not as expected
else
fileResult->backupCopyResult = backupCopyResultReCopy;
}
// Recopy on any kind of error
CATCH_ANY()
{
fileResult->backupCopyResult = backupCopyResultReCopy;
}
TRY_END();
}
}
MEM_CONTEXT_PRIOR_END();
}
// Else if source file is missing and the read setup indicated ignore a missing file, the database removed it so skip it
else
result.backupCopyResult = backupCopyResultSkip;
}
// Are the file compressible during the copy?
const bool compressible = repoFileCompressType == compressTypeNone && cipherType == cipherTypeNone;
// Copy files that need to be copied
StorageWrite *write = NULL;
uint64_t bundleOffset = 0;
for (unsigned int fileIdx = 0; fileIdx < lstSize(fileList); fileIdx++)
{
const BackupFile *const file = lstGet(fileList, fileIdx);
BackupFileResult *const fileResult = lstGet(result, fileIdx);
if (fileResult->backupCopyResult == backupCopyResultCopy || fileResult->backupCopyResult == backupCopyResultReCopy)
{
// Setup pg file for read. Only read as many bytes as passed in pgFileSize. If the file is growing it does no good
// to copy data past the end of the size recorded in the manifest since those blocks will need to be replayed from
// WAL during recovery.
StorageRead *read = storageNewReadP(
storagePg(), file->pgFile, .ignoreMissing = file->pgFileIgnoreMissing, .compressible = compressible,
.limit = file->pgFileCopyExactSize ? VARUINT64(file->pgFileSize) : NULL);
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), cryptoHashNew(HASH_TYPE_SHA1_STR));
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), ioSizeNew());
// Add page checksum filter
if (file->pgFileChecksumPage)
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)),
pageChecksumNew(segmentNumber(file->pgFile), PG_SEGMENT_PAGE_DEFAULT, file->pgFileChecksumPageLsnLimit));
}
// Add compression
if (repoFileCompressType != compressTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)), compressFilter(repoFileCompressType, repoFileCompressLevel));
}
// If there is a cipher then add the encrypt filter
if (cipherType != cipherTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)),
cipherBlockNew(cipherModeEncrypt, cipherType, BUFSTR(cipherPass), NULL));
}
// Add size filter last to calculate repo size
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), ioSizeNew());
// Open the source and destination and copy the file
if (ioReadOpen(storageReadIo(read)))
{
if (write == NULL)
{
// Setup the repo file for write. There is no need to write the file atomically (e.g. via a temp file on
// Posix) because checksums are tested on resume after a failed backup. The path does not need to be synced
// for each file because all paths are synced at the end of the backup.
write = storageNewWriteP(
storageRepoWrite(), repoFile, .compressible = compressible, .noAtomic = true, .noSyncPath = true);
ioWriteOpen(storageWriteIo(write));
}
// Copy data from source to destination
ioCopy(storageReadIo(read), storageWriteIo(write));
// Close the source
ioReadClose(storageReadIo(read));
MEM_CONTEXT_BEGIN(lstMemContext(result))
{
// Get sizes and checksum
fileResult->copySize = pckReadU64P(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), SIZE_FILTER_TYPE, .idx = 0));
fileResult->bundleOffset = bundleOffset;
fileResult->copyChecksum = strDup(
pckReadStrP(ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE)));
fileResult->repoSize = pckReadU64P(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), SIZE_FILTER_TYPE, .idx = 1));
// Get results of page checksum validation
if (file->pgFileChecksumPage)
{
fileResult->pageChecksumResult = pckDup(
ioFilterGroupResultPackP(ioReadFilterGroup(storageReadIo(read)), PAGE_CHECKSUM_FILTER_TYPE));
}
}
MEM_CONTEXT_END();
// Free the read object. This is very important if many files are being read because they can each contain a lot
// of buffers.
storageReadFree(read);
bundleOffset += fileResult->repoSize;
}
// Else if source file is missing and the read setup indicated ignore a missing file, the database removed it so
// skip it
else
fileResult->backupCopyResult = backupCopyResultSkip;
}
}
// Close the repository file if it was opened
if (write != NULL)
ioWriteClose(storageWriteIo(write));
lstMove(result, memContextPrior());
}
MEM_CONTEXT_TEMP_END();

View File

@ -23,20 +23,34 @@ typedef enum
/***********************************************************************************************************************************
Functions
***********************************************************************************************************************************/
// Copy a file from the archive to the specified destination
typedef struct BackupFile
{
const String *pgFile; // Pg file to backup
bool pgFileIgnoreMissing; // Ignore missing pg file
uint64_t pgFileSize; // Expected pg file size
bool pgFileCopyExactSize; // Copy only pg expected size
const String *pgFileChecksum; // Expected pg file checksum
bool pgFileChecksumPage; // Validate page checksums?
uint64_t pgFileChecksumPageLsnLimit; // Upper limit of pages to validate
const String *manifestFile; // Repo file
bool manifestFileHasReference; // Reference to prior backup, if any
} BackupFile;
// Copy a file from the PostgreSQL data directory to the repository
typedef struct BackupFileResult
{
const String *manifestFile; // Manifest file
BackupCopyResult backupCopyResult;
uint64_t copySize;
String *copyChecksum;
uint64_t bundleOffset; // Offset in bundle if any
uint64_t repoSize;
Pack *pageChecksumResult;
} BackupFileResult;
BackupFileResult backupFile(
const String *pgFile, bool pgFileIgnoreMissing, uint64_t pgFileSize, bool pgFileCopyExactSize, const String *pgFileChecksum,
bool pgFileChecksumPage, uint64_t pgFileChecksumPageLsnLimit, const String *repoFile, bool repoFileHasReference,
CompressType repoFileCompressType, int repoFileCompressLevel, const String *backupLabel, bool delta, CipherType cipherType,
const String *cipherPass);
List *backupFile(
const String *repoFile, CompressType repoFileCompressType, int repoFileCompressLevel, bool delta, CipherType cipherType,
const String *cipherPass, const List *fileList);
#endif

View File

@ -27,35 +27,51 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
MEM_CONTEXT_TEMP_BEGIN()
{
// Backup file
const String *const pgFile = pckReadStrP(param);
const bool pgFileIgnoreMissing = pckReadBoolP(param);
const uint64_t pgFileSize = pckReadU64P(param);
const bool pgFileCopyExactSize = pckReadBoolP(param);
const String *const pgFileChecksum = pckReadStrP(param);
const bool pgFileChecksumPage = pckReadBoolP(param);
const uint64_t pgFileChecksumPageLsnLimit = pckReadU64P(param);
// Backup options that apply to all files
const String *const repoFile = pckReadStrP(param);
const bool repoFileHasReference = pckReadBoolP(param);
const CompressType repoFileCompressType = (CompressType)pckReadU32P(param);
const int repoFileCompressLevel = pckReadI32P(param);
const String *const backupLabel = pckReadStrP(param);
const bool delta = pckReadBoolP(param);
const CipherType cipherType = (CipherType)pckReadU64P(param);
const String *const cipherPass = pckReadStrP(param);
const BackupFileResult result = backupFile(
pgFile, pgFileIgnoreMissing, pgFileSize, pgFileCopyExactSize, pgFileChecksum, pgFileChecksumPage,
pgFileChecksumPageLsnLimit, repoFile, repoFileHasReference, repoFileCompressType, repoFileCompressLevel,
backupLabel, delta, cipherType, cipherPass);
// Build the file list
List *fileList = lstNewP(sizeof(BackupFile));
while (!pckReadNullP(param))
{
BackupFile file = {.pgFile = pckReadStrP(param)};
file.pgFileIgnoreMissing = pckReadBoolP(param);
file.pgFileSize = pckReadU64P(param);
file.pgFileCopyExactSize = pckReadBoolP(param);
file.pgFileChecksum = pckReadStrP(param);
file.pgFileChecksumPage = pckReadBoolP(param);
file.pgFileChecksumPageLsnLimit = pckReadU64P(param);
file.manifestFile = pckReadStrP(param);
file.manifestFileHasReference = pckReadBoolP(param);
lstAdd(fileList, &file);
}
// Backup file
const List *const result = backupFile(
repoFile, repoFileCompressType, repoFileCompressLevel, delta, cipherType, cipherPass, fileList);
// Return result
PackWrite *const resultPack = protocolPackNew();
pckWriteU32P(resultPack, result.backupCopyResult);
pckWriteU64P(resultPack, result.copySize);
pckWriteU64P(resultPack, result.repoSize);
pckWriteStrP(resultPack, result.copyChecksum);
pckWritePackP(resultPack, result.pageChecksumResult);
for (unsigned int resultIdx = 0; resultIdx < lstSize(result); resultIdx++)
{
const BackupFileResult *const fileResult = lstGet(result, resultIdx);
pckWriteStrP(resultPack, fileResult->manifestFile);
pckWriteU32P(resultPack, fileResult->backupCopyResult);
pckWriteU64P(resultPack, fileResult->copySize);
pckWriteU64P(resultPack, fileResult->bundleOffset);
pckWriteU64P(resultPack, fileResult->repoSize);
pckWriteStrP(resultPack, fileResult->copyChecksum);
pckWritePackP(resultPack, fileResult->pageChecksumResult);
}
protocolServerDataPut(server, resultPack);
protocolServerDataEndPut(server);

View File

@ -16,20 +16,23 @@ Restore File
#include "common/io/io.h"
#include "common/log.h"
#include "config/config.h"
#include "info/manifest.h"
#include "storage/helper.h"
/**********************************************************************************************************************************/
bool
restoreFile(
const String *repoFile, unsigned int repoIdx, const String *repoFileReference, CompressType repoFileCompressType,
const String *pgFile, const String *pgFileChecksum, bool pgFileZero, uint64_t pgFileSize, time_t pgFileModified,
mode_t pgFileMode, const String *pgFileUser, const String *pgFileGroup, time_t copyTimeBegin, bool delta, bool deltaForce,
const String *cipherPass)
const String *const repoFile, unsigned int repoIdx, const uint64_t offset, const Variant *const limit,
const CompressType repoFileCompressType, const String *const pgFile, const String *const pgFileChecksum, const bool pgFileZero,
const uint64_t pgFileSize, const time_t pgFileModified, const mode_t pgFileMode, const String *const pgFileUser,
const String *const pgFileGroup, const time_t copyTimeBegin, const bool delta, const bool deltaForce,
const String *const cipherPass)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STRING, repoFile);
FUNCTION_LOG_PARAM(UINT, repoIdx);
FUNCTION_LOG_PARAM(STRING, repoFileReference);
FUNCTION_LOG_PARAM(UINT64, offset);
FUNCTION_LOG_PARAM(VARIANT, limit);
FUNCTION_LOG_PARAM(ENUM, repoFileCompressType);
FUNCTION_LOG_PARAM(STRING, pgFile);
FUNCTION_LOG_PARAM(STRING, pgFileChecksum);
@ -46,8 +49,8 @@ restoreFile(
FUNCTION_LOG_END();
ASSERT(repoFile != NULL);
ASSERT(repoFileReference != NULL);
ASSERT(pgFile != NULL);
ASSERT(limit == NULL || varType(limit) == varTypeUInt64);
// Was the file copied?
bool result = true;
@ -166,11 +169,7 @@ restoreFile(
// Copy file
storageCopyP(
storageNewReadP(
storageRepoIdx(repoIdx),
strNewFmt(
STORAGE_REPO_BACKUP "/%s/%s%s", strZ(repoFileReference), strZ(repoFile),
strZ(compressExtStr(repoFileCompressType))),
.compressible = compressible),
storageRepoIdx(repoIdx), repoFile, .compressible = compressible, .offset = offset, .limit = limit),
pgFileWrite);
// Validate checksum

View File

@ -14,7 +14,7 @@ Functions
***********************************************************************************************************************************/
// Copy a file from the backup to the specified destination
bool restoreFile(
const String *repoFile, unsigned int repoIdx, const String *repoFileReference, CompressType repoFileCompressType,
const String *repoFile, unsigned int repoIdx, uint64_t offset, const Variant *limit, CompressType repoFileCompressType,
const String *pgFile, const String *pgFileChecksum, bool pgFileZero, uint64_t pgFileSize, time_t pgFileModified,
mode_t pgFileMode, const String *pgFileUser, const String *pgFileGroup, time_t copyTimeBegin, bool delta, bool deltaForce,
const String *cipherPass);

View File

@ -28,8 +28,17 @@ restoreFileProtocol(PackRead *const param, ProtocolServer *const server)
{
// Restore file
const String *const repoFile = pckReadStrP(param);
uint64_t offset = 0;
const Variant *limit = NULL;
if (pckReadBoolP(param))
{
offset = pckReadU64P(param);
limit = varNewUInt64(pckReadU64P(param));
}
const unsigned int repoIdx = pckReadU32P(param);
const String *const repoFileReference = pckReadStrP(param);
const CompressType repoFileCompressType = (CompressType)pckReadU32P(param);
const String *const pgFile = pckReadStrP(param);
const String *const pgFileChecksum = pckReadStrP(param);
@ -45,8 +54,8 @@ restoreFileProtocol(PackRead *const param, ProtocolServer *const server)
const String *const cipherPass = pckReadStrP(param);
const bool result = restoreFile(
repoFile, repoIdx, repoFileReference, repoFileCompressType, pgFile, pgFileChecksum, pgFileZero, pgFileSize,
pgFileModified, pgFileMode, pgFileUser, pgFileGroup, copyTimeBegin, delta, deltaForce, cipherPass);
repoFile, repoIdx, offset, limit, repoFileCompressType, pgFile, pgFileChecksum, pgFileZero, pgFileSize, pgFileModified,
pgFileMode, pgFileUser, pgFileGroup, copyTimeBegin, delta, deltaForce, cipherPass);
// Return result
protocolServerDataPut(server, pckWriteBoolP(protocolPackNew(), result));

View File

@ -2194,9 +2194,28 @@ static ProtocolParallelJob *restoreJobCallback(void *data, unsigned int clientId
ProtocolCommand *command = protocolCommandNew(PROTOCOL_COMMAND_RESTORE_FILE);
PackWrite *const param = protocolCommandParam(command);
pckWriteStrP(param, file.name);
const String *const repoPath = strNewFmt(
STORAGE_REPO_BACKUP "/%s/",
strZ(file.reference != NULL ? file.reference : manifestData(jobData->manifest)->backupLabel));
if (file.bundleId != 0)
{
pckWriteStrP(param, strNewFmt("%s" MANIFEST_PATH_BUNDLE "/%" PRIu64, strZ(repoPath), file.bundleId));
pckWriteBoolP(param, true);
pckWriteU64P(param, file.bundleOffset);
pckWriteU64P(param, file.sizeRepo);
}
else
{
pckWriteStrP(
param,
strNewFmt(
"%s%s%s", strZ(repoPath), strZ(file.name),
strZ(compressExtStr(manifestData(jobData->manifest)->backupOptionCompressType))));
pckWriteBoolP(param, false);
}
pckWriteU32P(param, jobData->repoIdx);
pckWriteStrP(param, file.reference != NULL ? file.reference : manifestData(jobData->manifest)->backupLabel);
pckWriteU32P(param, manifestData(jobData->manifest)->backupOptionCompressType);
pckWriteStrP(param, restoreFilePgPath(jobData->manifest, file.name));
pckWriteStrP(param, STR(file.checksumSha1));

View File

@ -17,10 +17,14 @@ Verify File
/**********************************************************************************************************************************/
VerifyResult
verifyFile(
const String *filePathName, const String *fileChecksum, uint64_t fileSize, const String *cipherPass)
const String *const filePathName, const uint64_t offset, const Variant *const limit, const CompressType compressType,
const String *const fileChecksum, const uint64_t fileSize, const String *const cipherPass)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STRING, filePathName); // Fully qualified file name
FUNCTION_LOG_PARAM(UINT64, offset); // Offset to read in file
FUNCTION_LOG_PARAM(VARIANT, limit); // Limit to read from file
FUNCTION_LOG_PARAM(ENUM, compressType); // Compression type
FUNCTION_LOG_PARAM(STRING, fileChecksum); // Checksum for the file
FUNCTION_LOG_PARAM(UINT64, fileSize); // Size of file
FUNCTION_TEST_PARAM(STRING, cipherPass); // Password to access the repo file if encrypted
@ -28,6 +32,7 @@ verifyFile(
ASSERT(filePathName != NULL);
ASSERT(fileChecksum != NULL);
ASSERT(limit == NULL || varType(limit) == varTypeUInt64);
// Is the file valid?
VerifyResult result = verifyOk;
@ -35,7 +40,8 @@ verifyFile(
MEM_CONTEXT_TEMP_BEGIN()
{
// Prepare the file for reading
IoRead *read = storageReadIo(storageNewReadP(storageRepo(), filePathName, .ignoreMissing = true));
IoRead *read = storageReadIo(
storageNewReadP(storageRepo(), filePathName, .ignoreMissing = true, .offset = offset, .limit = limit));
IoFilterGroup *filterGroup = ioReadFilterGroup(read);
// Add decryption filter
@ -43,8 +49,8 @@ verifyFile(
ioFilterGroupAdd(filterGroup, cipherBlockNew(cipherModeDecrypt, cipherTypeAes256Cbc, BUFSTR(cipherPass), NULL));
// Add decompression filter
if (compressTypeFromName(filePathName) != compressTypeNone)
ioFilterGroupAdd(filterGroup, decompressFilter(compressTypeFromName(filePathName)));
if (compressType != compressTypeNone)
ioFilterGroupAdd(filterGroup, decompressFilter(compressType));
// Add sha1 filter
ioFilterGroupAdd(filterGroup, cryptoHashNew(HASH_TYPE_SHA1_STR));

View File

@ -24,6 +24,7 @@ Functions
***********************************************************************************************************************************/
// Verify a file in the pgBackRest repository
VerifyResult verifyFile(
const String *filePathName, const String *fileChecksum, uint64_t fileSize, const String *cipherPass);
const String *filePathName, uint64_t offset, const Variant *limit, CompressType compressType, const String *fileChecksum,
uint64_t fileSize, const String *cipherPass);
#endif

View File

@ -28,11 +28,22 @@ verifyFileProtocol(PackRead *const param, ProtocolServer *const server)
{
// Verify file
const String *const filePathName = pckReadStrP(param);
uint64_t offset = 0;
const Variant *limit = NULL;
if (pckReadBoolP(param))
{
offset = pckReadU64P(param);
limit = varNewUInt64(pckReadU64P(param));
}
const CompressType compressType = (CompressType)pckReadU32P(param);
const String *const fileChecksum = pckReadStrP(param);
const uint64_t fileSize = pckReadU64P(param);
const String *const cipherPass = pckReadStrP(param);
const VerifyResult result = verifyFile(filePathName, fileChecksum, fileSize, cipherPass);
const VerifyResult result = verifyFile(filePathName, offset, limit, compressType, fileChecksum, fileSize, cipherPass);
// Return result
protocolServerDataPut(server, pckWriteU32P(protocolPackNew(), result));

View File

@ -757,6 +757,8 @@ verifyArchive(void *data)
PackWrite *const param = protocolCommandParam(command);
pckWriteStrP(param, filePathName);
pckWriteBoolP(param, false);
pckWriteU32P(param, compressTypeFromName(filePathName));
pckWriteStrP(param, checksum);
pckWriteU64P(param, archiveResult->pgWalInfo.size);
pckWriteStrP(param, jobData->walCipherPass);
@ -907,81 +909,106 @@ verifyBackup(void *data)
// Track the files verified in order to determine when the processing of the backup is complete
backupResult->totalFileVerify++;
// Check if the file is referenced in a prior backup
const String *fileBackupLabel = NULL;
if (fileData.reference != NULL)
// Check the file if it is not zero-length or not bundled
if (fileData.size != 0 || !manifestData(jobData->manifest)->bundle)
{
// If the prior backup is not in the result list, then that backup was never processed (likely due to the --set
// option) so verify the file
unsigned int backupPriorIdx = lstFindIdx(jobData->backupResultList, &fileData.reference);
// Check if the file is referenced in a prior backup
const String *fileBackupLabel = NULL;
if (backupPriorIdx == LIST_NOT_FOUND)
if (fileData.reference != NULL)
{
fileBackupLabel = fileData.reference;
}
// Else the backup this file references has a result so check the processing state for the referenced backup
else
{
VerifyBackupResult *backupResultPrior = lstGet(jobData->backupResultList, backupPriorIdx);
// If the prior backup is not in the result list, then that backup was never processed (likely due to the
// --set option) so verify the file
unsigned int backupPriorIdx = lstFindIdx(jobData->backupResultList, &fileData.reference);
// If the verify-state of the backup is not complete then verify the file
if (!backupResultPrior->fileVerifyComplete)
if (backupPriorIdx == LIST_NOT_FOUND)
{
fileBackupLabel = fileData.reference;
}
// Else skip verification
// Else the backup this file references has a result so check the processing state for the referenced backup
else
{
String *priorFile = strNewFmt(
"%s/%s%s", strZ(fileData.reference), strZ(fileData.name),
strZ(compressExtStr((manifestData(jobData->manifest))->backupOptionCompressType)));
VerifyBackupResult *backupResultPrior = lstGet(jobData->backupResultList, backupPriorIdx);
unsigned int backupPriorInvalidIdx = lstFindIdx(backupResultPrior->invalidFileList, &priorFile);
// If the file is in the invalid file list of the prior backup where it is referenced then add the file
// as invalid to this backup result and set the backup result status; since already logged an error on
// this file, don't log again
if (backupPriorInvalidIdx != LIST_NOT_FOUND)
// If the verify-state of the backup is not complete then verify the file
if (!backupResultPrior->fileVerifyComplete)
{
VerifyInvalidFile *invalidFile = lstGet(
backupResultPrior->invalidFileList, backupPriorInvalidIdx);
verifyInvalidFileAdd(backupResult->invalidFileList, invalidFile->reason, invalidFile->fileName);
backupResult->status = backupInvalid;
fileBackupLabel = fileData.reference;
}
// Else the file in the prior backup was valid so increment the total valid files for this backup
// Else skip verification
else
{
backupResult->totalFileValid++;
String *priorFile = strNewFmt(
"%s/%s%s", strZ(fileData.reference), strZ(fileData.name),
strZ(compressExtStr((manifestData(jobData->manifest))->backupOptionCompressType)));
unsigned int backupPriorInvalidIdx = lstFindIdx(backupResultPrior->invalidFileList, &priorFile);
// If the file is in the invalid file list of the prior backup where it is referenced then add the
// file as invalid to this backup result and set the backup result status; since already logged an
// error on this file, don't log again
if (backupPriorInvalidIdx != LIST_NOT_FOUND)
{
VerifyInvalidFile *invalidFile = lstGet(
backupResultPrior->invalidFileList, backupPriorInvalidIdx);
verifyInvalidFileAdd(backupResult->invalidFileList, invalidFile->reason, invalidFile->fileName);
backupResult->status = backupInvalid;
}
// Else the file in the prior backup was valid so increment the total valid files for this backup
else
{
backupResult->totalFileValid++;
}
}
}
}
// Else file is not referenced in a prior backup
else
fileBackupLabel = backupResult->backupLabel;
// If backup label is not null then send it off for processing
if (fileBackupLabel != NULL)
{
// Set up the job
ProtocolCommand *command = protocolCommandNew(PROTOCOL_COMMAND_VERIFY_FILE);
PackWrite *const param = protocolCommandParam(command);
const String *const filePathName = strNewFmt(
STORAGE_REPO_BACKUP "/%s/%s%s", strZ(fileBackupLabel), strZ(fileData.name),
strZ(compressExtStr((manifestData(jobData->manifest))->backupOptionCompressType)));
if (fileData.bundleId != 0)
{
pckWriteStrP(
param,
strNewFmt(
STORAGE_REPO_BACKUP "/%s/" MANIFEST_PATH_BUNDLE "/%" PRIu64, strZ(fileBackupLabel),
fileData.bundleId));
pckWriteBoolP(param, true);
pckWriteU64P(param, fileData.bundleOffset);
pckWriteU64P(param, fileData.sizeRepo);
}
else
{
pckWriteStrP(param, filePathName);
pckWriteBoolP(param, false);
}
pckWriteU32P(param, manifestData(jobData->manifest)->backupOptionCompressType);
// If the checksum is not present in the manifest, it will be calculated by manifest load
pckWriteStrP(param, STR(fileData.checksumSha1));
pckWriteU64P(param, fileData.size);
pckWriteStrP(param, jobData->backupCipherPass);
// Assign job to result (prepend backup label being processed to the key since some files are in a prior
// backup)
result = protocolParallelJobNew(
VARSTR(strNewFmt("%s/%s", strZ(backupResult->backupLabel), strZ(filePathName))), command);
}
}
// Else file is not referenced in a prior backup
// Else mark the zero-length file as valid
else
fileBackupLabel = backupResult->backupLabel;
// If backup label is not null then send it off for processing
if (fileBackupLabel != NULL)
{
// Set up the job
ProtocolCommand *command = protocolCommandNew(PROTOCOL_COMMAND_VERIFY_FILE);
PackWrite *const param = protocolCommandParam(command);
const String *const filePathName = strNewFmt(
STORAGE_REPO_BACKUP "/%s/%s%s", strZ(fileBackupLabel), strZ(fileData.name),
strZ(compressExtStr((manifestData(jobData->manifest))->backupOptionCompressType)));
pckWriteStrP(param, filePathName);
// If the checksum is not present in the manifest, it will be calculated by manifest load
pckWriteStrP(param, STR(fileData.checksumSha1));
pckWriteU64P(param, fileData.size);
pckWriteStrP(param, jobData->backupCipherPass);
// Assign job to result (prepend backup label being processed to the key since some files are in a prior backup)
result = protocolParallelJobNew(
VARSTR(strNewFmt("%s/%s", strZ(backupResult->backupLabel), strZ(filePathName))), command);
}
backupResult->totalFileValid++;
// Increment the index to point to the next file
jobData->manifestFileIdx++;

View File

@ -53,6 +53,8 @@ Option constants
#define CFGOPT_ARCHIVE_TIMEOUT "archive-timeout"
#define CFGOPT_BACKUP_STANDBY "backup-standby"
#define CFGOPT_BUFFER_SIZE "buffer-size"
#define CFGOPT_BUNDLE "bundle"
#define CFGOPT_BUNDLE_SIZE "bundle-size"
#define CFGOPT_CHECKSUM_PAGE "checksum-page"
#define CFGOPT_CIPHER_PASS "cipher-pass"
#define CFGOPT_CMD "cmd"
@ -126,7 +128,7 @@ Option constants
#define CFGOPT_TLS_SERVER_PORT "tls-server-port"
#define CFGOPT_TYPE "type"
#define CFG_OPTION_TOTAL 150
#define CFG_OPTION_TOTAL 152
/***********************************************************************************************************************************
Option value constants
@ -363,6 +365,8 @@ typedef enum
cfgOptArchiveTimeout,
cfgOptBackupStandby,
cfgOptBufferSize,
cfgOptBundle,
cfgOptBundleSize,
cfgOptChecksumPage,
cfgOptCipherPass,
cfgOptCmd,

View File

@ -16,6 +16,7 @@ static const StringPub parseRuleValueStr[] =
PARSE_RULE_STRPUB("/var/log/pgbackrest"),
PARSE_RULE_STRPUB("/var/spool/pgbackrest"),
PARSE_RULE_STRPUB("1"),
PARSE_RULE_STRPUB("100MiB"),
PARSE_RULE_STRPUB("128MiB"),
PARSE_RULE_STRPUB("15"),
PARSE_RULE_STRPUB("1800"),
@ -63,6 +64,7 @@ typedef enum
parseRuleValStrQT_FS_var_FS_log_FS_pgbackrest_QT,
parseRuleValStrQT_FS_var_FS_spool_FS_pgbackrest_QT,
parseRuleValStrQT_1_QT,
parseRuleValStrQT_100MiB_QT,
parseRuleValStrQT_128MiB_QT,
parseRuleValStrQT_15_QT,
parseRuleValStrQT_1800_QT,
@ -258,10 +260,12 @@ static const int64_t parseRuleValueInt[] =
9999999,
16777216,
86400000,
104857600,
134217728,
604800000,
1073741824,
1099511627776,
1125899906842624,
4503599627370496,
};
@ -304,10 +308,12 @@ typedef enum
parseRuleValInt9999999,
parseRuleValInt16777216,
parseRuleValInt86400000,
parseRuleValInt104857600,
parseRuleValInt134217728,
parseRuleValInt604800000,
parseRuleValInt1073741824,
parseRuleValInt1099511627776,
parseRuleValInt1125899906842624,
parseRuleValInt4503599627370496,
} ParseRuleValueInt;
@ -1142,6 +1148,72 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] =
),
),
// -----------------------------------------------------------------------------------------------------------------------------
PARSE_RULE_OPTION
(
PARSE_RULE_OPTION_NAME("bundle"),
PARSE_RULE_OPTION_TYPE(cfgOptTypeBoolean),
PARSE_RULE_OPTION_NEGATE(true),
PARSE_RULE_OPTION_RESET(true),
PARSE_RULE_OPTION_REQUIRED(true),
PARSE_RULE_OPTION_SECTION(cfgSectionGlobal),
PARSE_RULE_OPTION_COMMAND_ROLE_MAIN_VALID_LIST
(
PARSE_RULE_OPTION_COMMAND(cfgCmdBackup)
),
PARSE_RULE_OPTIONAL
(
PARSE_RULE_OPTIONAL_GROUP
(
PARSE_RULE_OPTIONAL_DEFAULT
(
PARSE_RULE_VAL_BOOL_FALSE,
),
),
),
),
// -----------------------------------------------------------------------------------------------------------------------------
PARSE_RULE_OPTION
(
PARSE_RULE_OPTION_NAME("bundle-size"),
PARSE_RULE_OPTION_TYPE(cfgOptTypeSize),
PARSE_RULE_OPTION_RESET(true),
PARSE_RULE_OPTION_REQUIRED(true),
PARSE_RULE_OPTION_SECTION(cfgSectionGlobal),
PARSE_RULE_OPTION_COMMAND_ROLE_MAIN_VALID_LIST
(
PARSE_RULE_OPTION_COMMAND(cfgCmdBackup)
),
PARSE_RULE_OPTIONAL
(
PARSE_RULE_OPTIONAL_GROUP
(
PARSE_RULE_OPTIONAL_DEPEND
(
PARSE_RULE_VAL_OPT(cfgOptBundle),
PARSE_RULE_VAL_BOOL_TRUE,
),
PARSE_RULE_OPTIONAL_ALLOW_RANGE
(
PARSE_RULE_VAL_INT(parseRuleValInt1048576),
PARSE_RULE_VAL_INT(parseRuleValInt1125899906842624),
),
PARSE_RULE_OPTIONAL_DEFAULT
(
PARSE_RULE_VAL_INT(parseRuleValInt104857600),
PARSE_RULE_VAL_STR(parseRuleValStrQT_100MiB_QT),
),
),
),
),
// -----------------------------------------------------------------------------------------------------------------------------
PARSE_RULE_OPTION
(
@ -7709,6 +7781,12 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] =
(
PARSE_RULE_OPTIONAL_GROUP
(
PARSE_RULE_OPTIONAL_DEPEND
(
PARSE_RULE_VAL_OPT(cfgOptBundle),
PARSE_RULE_VAL_BOOL_FALSE,
),
PARSE_RULE_OPTIONAL_DEFAULT
(
PARSE_RULE_VAL_BOOL_TRUE,
@ -9087,6 +9165,8 @@ static const ConfigOption optionResolveOrder[] =
cfgOptArchiveTimeout,
cfgOptBackupStandby,
cfgOptBufferSize,
cfgOptBundle,
cfgOptBundleSize,
cfgOptChecksumPage,
cfgOptCipherPass,
cfgOptCmd,

View File

@ -50,6 +50,8 @@ STRING_STATIC(MANIFEST_SECTION_TARGET_PATH_DEFAULT_STR, "target:path
STRING_STATIC(MANIFEST_KEY_BACKUP_ARCHIVE_START_STR, MANIFEST_KEY_BACKUP_ARCHIVE_START);
#define MANIFEST_KEY_BACKUP_ARCHIVE_STOP "backup-archive-stop"
STRING_STATIC(MANIFEST_KEY_BACKUP_ARCHIVE_STOP_STR, MANIFEST_KEY_BACKUP_ARCHIVE_STOP);
#define MANIFEST_KEY_BACKUP_BUNDLE "backup-bundle"
STRING_STATIC(MANIFEST_KEY_BACKUP_BUNDLE_STR, MANIFEST_KEY_BACKUP_BUNDLE);
#define MANIFEST_KEY_BACKUP_LABEL "backup-label"
STRING_STATIC(MANIFEST_KEY_BACKUP_LABEL_STR, MANIFEST_KEY_BACKUP_LABEL);
#define MANIFEST_KEY_BACKUP_LSN_START "backup-lsn-start"
@ -66,6 +68,10 @@ STRING_STATIC(MANIFEST_SECTION_TARGET_PATH_DEFAULT_STR, "target:path
STRING_STATIC(MANIFEST_KEY_BACKUP_TIMESTAMP_STOP_STR, MANIFEST_KEY_BACKUP_TIMESTAMP_STOP);
#define MANIFEST_KEY_BACKUP_TYPE "backup-type"
STRING_STATIC(MANIFEST_KEY_BACKUP_TYPE_STR, MANIFEST_KEY_BACKUP_TYPE);
#define MANIFEST_KEY_BUNDLE_ID "bni"
VARIANT_STRDEF_STATIC(MANIFEST_KEY_BUNDLE_ID_VAR, MANIFEST_KEY_BUNDLE_ID);
#define MANIFEST_KEY_BUNDLE_OFFSET "bno"
VARIANT_STRDEF_STATIC(MANIFEST_KEY_BUNDLE_OFFSET_VAR, MANIFEST_KEY_BUNDLE_OFFSET);
#define MANIFEST_KEY_CHECKSUM "checksum"
VARIANT_STRDEF_STATIC(MANIFEST_KEY_CHECKSUM_VAR, MANIFEST_KEY_CHECKSUM);
#define MANIFEST_KEY_CHECKSUM_PAGE "checksum-page"
@ -209,6 +215,7 @@ static time_t manifestPackBaseTime = -1;
typedef enum
{
manifestFilePackFlagReference,
manifestFilePackFlagBundle,
manifestFilePackFlagChecksumPage,
manifestFilePackFlagChecksumPageError,
manifestFilePackFlagChecksumPageErrorList,
@ -246,6 +253,9 @@ manifestFilePack(const Manifest *const manifest, const ManifestFile *const file)
if (file->reference != NULL)
flag |= 1 << manifestFilePackFlagReference;
if (file->bundleId != 0)
flag |= 1 << manifestFilePackFlagBundle;
if (file->mode != manifest->fileModeDefault)
flag |= 1 << manifestFilePackFlagMode;
@ -294,6 +304,13 @@ manifestFilePack(const Manifest *const manifest, const ManifestFile *const file)
// Repo size
cvtUInt64ToVarInt128(file->sizeRepo, buffer, &bufferPos, sizeof(buffer));
// Bundle
if (flag & (1 << manifestFilePackFlagBundle))
{
cvtUInt64ToVarInt128(file->bundleId, buffer, &bufferPos, sizeof(buffer));
cvtUInt64ToVarInt128(file->bundleOffset, buffer, &bufferPos, sizeof(buffer));
}
// Allocate memory for the file pack
uint8_t *const result = memNew(
sizeof(StringPub) + strSize(file->name) + 1 + bufferPos + (file->checksumPageErrorList != NULL ?
@ -383,6 +400,13 @@ manifestFileUnpack(const Manifest *const manifest, const ManifestFilePack *const
// Repo size
result.sizeRepo = cvtUInt64FromVarInt128((const uint8_t *)filePack, &bufferPos);
// Bundle
if (flag & (1 << manifestFilePackFlagBundle))
{
result.bundleId = cvtUInt64FromVarInt128((const uint8_t *)filePack, &bufferPos);
result.bundleOffset = cvtUInt64FromVarInt128((const uint8_t *)filePack, &bufferPos);
}
// Checksum page error
result.checksumPageError = flag & (1 << manifestFilePackFlagChecksumPageError) ? true : false;
@ -1214,8 +1238,8 @@ manifestBuildCallback(void *data, const StorageInfo *info)
Manifest *
manifestNewBuild(
const Storage *storagePg, unsigned int pgVersion, unsigned int pgCatalogVersion, bool online, bool checksumPage,
const StringList *excludeList, const VariantList *tablespaceList)
const Storage *const storagePg, const unsigned int pgVersion, const unsigned int pgCatalogVersion, const bool online,
const bool checksumPage, const bool bundle, const StringList *const excludeList, const VariantList *const tablespaceList)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STORAGE, storagePg);
@ -1223,6 +1247,7 @@ manifestNewBuild(
FUNCTION_LOG_PARAM(UINT, pgCatalogVersion);
FUNCTION_LOG_PARAM(BOOL, online);
FUNCTION_LOG_PARAM(BOOL, checksumPage);
FUNCTION_LOG_PARAM(BOOL, bundle);
FUNCTION_LOG_PARAM(STRING_LIST, excludeList);
FUNCTION_LOG_PARAM(VARIANT_LIST, tablespaceList);
FUNCTION_LOG_END();
@ -1243,6 +1268,7 @@ manifestNewBuild(
this->pub.data.backupType = backupTypeFull;
this->pub.data.backupOptionOnline = online;
this->pub.data.backupOptionChecksumPage = varNewBool(checksumPage);
this->pub.data.bundle = bundle;
MEM_CONTEXT_TEMP_BEGIN()
{
@ -1584,7 +1610,8 @@ manifestBuildIncr(Manifest *this, const Manifest *manifestPrior, BackupType type
manifestFileUpdate(
this, file.name, file.size, filePrior.sizeRepo, filePrior.checksumSha1,
VARSTR(filePrior.reference != NULL ? filePrior.reference : manifestPrior->pub.data.backupLabel),
filePrior.checksumPage, filePrior.checksumPageError, filePrior.checksumPageErrorList);
filePrior.checksumPage, filePrior.checksumPageError, filePrior.checksumPageErrorList,
filePrior.bundleId, filePrior.bundleOffset);
}
}
}
@ -1814,6 +1841,12 @@ manifestLoadCallback(void *callbackData, const String *section, const String *ke
file.checksumPageErrorList = jsonFromVar(checksumPageErrorList);
}
// Bundle info
file.bundleId = varUInt64(kvGetDefault(fileKv, MANIFEST_KEY_BUNDLE_ID_VAR, VARUINT64(0)));
if (file.bundleId != 0)
file.bundleOffset = varUInt64(kvGetDefault(fileKv, MANIFEST_KEY_BUNDLE_OFFSET_VAR, VARUINT64(0)));
// Group
const Variant *value = kvGet(fileKv, MANIFEST_KEY_GROUP_VAR);
@ -2006,6 +2039,8 @@ manifestLoadCallback(void *callbackData, const String *section, const String *ke
manifest->pub.data.archiveStart = strDup(varStr(value));
else if (strEq(key, MANIFEST_KEY_BACKUP_ARCHIVE_STOP_STR))
manifest->pub.data.archiveStop = strDup(varStr(value));
else if (strEq(key, MANIFEST_KEY_BACKUP_BUNDLE_STR))
manifest->pub.data.bundle = varBool(value);
else if (strEq(key, MANIFEST_KEY_BACKUP_LABEL_STR))
manifest->pub.data.backupLabel = strDup(varStr(value));
else if (strEq(key, MANIFEST_KEY_BACKUP_LSN_START_STR))
@ -2231,6 +2266,12 @@ manifestSaveCallback(void *callbackData, const String *sectionNext, InfoSave *in
jsonFromStr(manifest->pub.data.archiveStop));
}
if (manifest->pub.data.bundle)
{
infoSaveValue(
infoSaveData, MANIFEST_SECTION_BACKUP_STR, MANIFEST_KEY_BACKUP_BUNDLE_STR, jsonFromBool(manifest->pub.data.bundle));
}
infoSaveValue(
infoSaveData, MANIFEST_SECTION_BACKUP_STR, MANIFEST_KEY_BACKUP_LABEL_STR,
jsonFromStr(manifest->pub.data.backupLabel));
@ -2433,6 +2474,15 @@ manifestSaveCallback(void *callbackData, const String *sectionNext, InfoSave *in
const ManifestFile file = manifestFile(manifest, fileIdx);
KeyValue *fileKv = kvNew();
// Bundle info
if (file.bundleId != 0)
{
kvPut(fileKv, MANIFEST_KEY_BUNDLE_ID_VAR, VARUINT64(file.bundleId));
if (file.bundleOffset != 0)
kvPut(fileKv, MANIFEST_KEY_BUNDLE_OFFSET_VAR, VARUINT64(file.bundleOffset));
}
// Save if the file size is not zero and the checksum exists. The checksum might not exist if this is a partial
// save performed during a backup.
if (file.size != 0 && file.checksumSha1[0] != 0)
@ -2731,7 +2781,7 @@ void
manifestFileUpdate(
Manifest *const this, const String *const name, const uint64_t size, const uint64_t sizeRepo, const char *const checksumSha1,
const Variant *const reference, const bool checksumPage, const bool checksumPageError,
const String *const checksumPageErrorList)
const String *const checksumPageErrorList, const uint64_t bundleId, const uint64_t bundleOffset)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(MANIFEST, this);
@ -2743,6 +2793,8 @@ manifestFileUpdate(
FUNCTION_TEST_PARAM(BOOL, checksumPage);
FUNCTION_TEST_PARAM(BOOL, checksumPageError);
FUNCTION_TEST_PARAM(STRING, checksumPageErrorList);
FUNCTION_TEST_PARAM(UINT64, bundleId);
FUNCTION_TEST_PARAM(UINT64, bundleOffset);
FUNCTION_TEST_END();
ASSERT(this != NULL);
@ -2776,6 +2828,10 @@ manifestFileUpdate(
file.checksumPageError = checksumPageError;
file.checksumPageErrorList = checksumPageErrorList;
// Update bundle info
file.bundleId = bundleId;
file.bundleOffset = bundleOffset;
manifestFilePackUpdate(this, filePack, &file);
FUNCTION_TEST_RETURN_VOID();

View File

@ -16,6 +16,9 @@ Constants
#define BACKUP_MANIFEST_FILE "backup.manifest"
STRING_DECLARE(BACKUP_MANIFEST_FILE_STR);
#define MANIFEST_PATH_BUNDLE "bundle"
STRING_DECLARE(MANIFEST_PATH_BUNDLE_STR);
#define MANIFEST_TARGET_PGDATA "pg_data"
STRING_DECLARE(MANIFEST_TARGET_PGDATA_STR);
#define MANIFEST_TARGET_PGTBLSPC "pg_tblspc"
@ -49,6 +52,7 @@ typedef struct ManifestData
time_t backupTimestampStart; // When did the backup start?
time_t backupTimestampStop; // When did the backup stop?
BackupType backupType; // Type of backup: full, diff, incr
bool bundle; // Does the backup bundle files?
// ??? Note that these fields are redundant and verbose since storing the start/stop lsn as a uint64 would be sufficient.
// However, we currently lack the functions to transform these values back and forth so this will do for now.
@ -100,6 +104,8 @@ typedef struct ManifestFile
const String *user; // User name
const String *group; // Group name
const String *reference; // Reference to a prior backup
uint64_t bundleId; // Bundle id
uint64_t bundleOffset; // Bundle offset
uint64_t size; // Original size
uint64_t sizeRepo; // Size in repo
time_t timestamp; // Original timestamp
@ -151,7 +157,7 @@ Constructors
***********************************************************************************************************************************/
// Build a new manifest for a PostgreSQL data directory
Manifest *manifestNewBuild(
const Storage *storagePg, unsigned int pgVersion, unsigned int pgCatalogVersion, bool online, bool checksumPage,
const Storage *storagePg, unsigned int pgVersion, unsigned int pgCatalogVersion, bool online, bool checksumPage, bool bundle,
const StringList *excludeList, const VariantList *tablespaceList);
// Load a manifest from IO
@ -317,7 +323,7 @@ manifestFileTotal(const Manifest *const this)
// Update a file with new data
void manifestFileUpdate(
Manifest *this, const String *name, uint64_t size, uint64_t sizeRepo, const char *checksumSha1, const Variant *reference,
bool checksumPage, bool checksumPageError, const String *checksumPageErrorList);
bool checksumPage, bool checksumPageError, const String *checksumPageErrorList, uint64_t bundleId, uint64_t bundleOffset);
/***********************************************************************************************************************************
Link functions and getters/setters

View File

@ -30,6 +30,8 @@ pg1-socket-path=[TEST_PATH]/db-primary/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -62,6 +64,8 @@ pg1-socket-path=[TEST_PATH]/db-standby/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -108,6 +112,8 @@ pg256-port=6544
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -141,6 +147,8 @@ pg1-socket-path=[TEST_PATH]/db-primary/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -173,6 +181,8 @@ pg1-socket-path=[TEST_PATH]/db-standby/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -219,6 +229,8 @@ pg256-port=6544
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -252,6 +264,8 @@ pg1-socket-path=[TEST_PATH]/db-primary/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -284,6 +298,8 @@ pg1-socket-path=[TEST_PATH]/db-standby/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -330,6 +346,8 @@ pg256-port=6544
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -382,6 +400,8 @@ pg1-socket-path=[TEST_PATH]/db-primary/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -416,6 +436,8 @@ pg1-socket-path=[TEST_PATH]/db-standby/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -463,6 +485,8 @@ pg256-port=6544
[global]
archive-async=y
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -500,6 +524,8 @@ pg1-socket-path=[TEST_PATH]/db-primary/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -534,6 +560,8 @@ pg1-socket-path=[TEST_PATH]/db-standby/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -581,6 +609,8 @@ pg256-port=6544
[global]
archive-async=y
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -634,6 +664,8 @@ pg1-socket-path=[TEST_PATH]/db-primary/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -668,6 +700,8 @@ pg1-socket-path=[TEST_PATH]/db-standby/db
[global]
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none
@ -715,6 +749,8 @@ pg256-port=6544
[global]
archive-async=y
buffer-size=[BUFFER-SIZE]
bundle=y
bundle-size=1MiB
compress-level=3
compress-level-network=1
compress-type=none

View File

@ -409,6 +409,7 @@ sub backupCreate
$oManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_BACKUP_STANDBY, undef, false);
$oManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_START, undef, $strArchiveStart);
$oManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP, undef, $strArchiveStop);
$oManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, 'backup-bundle', undef, true);
$oManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_CHECKSUM_PAGE, undef, true);
$oManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS, undef, true);
$oManifest->numericSet(INI_SECTION_BACKREST, INI_KEY_FORMAT, undef, REPOSITORY_FORMAT);

View File

@ -413,7 +413,8 @@ sub backupEnd
# Make sure tablespace links are correct
if ($self->hasLink())
{
if ($strType eq CFGOPTVAL_BACKUP_TYPE_FULL || $self->hardLink())
if (($strType eq CFGOPTVAL_BACKUP_TYPE_FULL || $self->hardLink()) &&
!$oExpectedManifest->{&MANIFEST_SECTION_BACKUP}{'backup-bundle'})
{
my $hTablespaceManifest = storageTest()->manifest(
$self->repoBackupPath("${strBackup}/" . MANIFEST_TARGET_PGDATA . '/' . DB_PATH_PGTBLSPC));
@ -1174,6 +1175,13 @@ sub configCreate
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'log-timestamp'} = 'n';
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'buffer-size'} = '64k';
if ($oParam->{bBundle})
{
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'bundle'} = 'y';
# Set bundle size smaller for testing and because FakeGCS does not do multi-part upload
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'bundle-size'} = '1MiB';
}
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'log-path'} = $self->logPath();
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'lock-path'} = $self->lockPath();
@ -2070,9 +2078,13 @@ sub restoreCompare
${$oExpectedManifestRef}{&MANIFEST_SECTION_TARGET_FILE}{$strName}{size});
}
# Remove repo-size from the manifest. ??? This could be improved to get actual sizes from the backup.
# Remove repo-size, bno, bni from the manifest
$oActualManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strName, MANIFEST_SUBKEY_REPO_SIZE);
delete($oExpectedManifestRef->{&MANIFEST_SECTION_TARGET_FILE}{$strName}{&MANIFEST_SUBKEY_REPO_SIZE});
$oActualManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strName, "bni");
delete($oExpectedManifestRef->{&MANIFEST_SECTION_TARGET_FILE}{$strName}{"bni"});
$oActualManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strName, "bno");
delete($oExpectedManifestRef->{&MANIFEST_SECTION_TARGET_FILE}{$strName}{"bno"});
if ($oActualManifest->get(MANIFEST_SECTION_TARGET_FILE, $strName, MANIFEST_SUBKEY_SIZE) != 0)
{
@ -2134,6 +2146,8 @@ sub restoreCompare
${$oExpectedManifestRef}{&MANIFEST_SECTION_BACKUP_OPTION}{&MANIFEST_KEY_ARCHIVE_COPY});
$oActualManifest->set(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_BACKUP_STANDBY, undef,
${$oExpectedManifestRef}{&MANIFEST_SECTION_BACKUP_OPTION}{&MANIFEST_KEY_BACKUP_STANDBY});
$oActualManifest->set(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_BACKUP_STANDBY, undef,
${$oExpectedManifestRef}{&MANIFEST_SECTION_BACKUP_OPTION}{&MANIFEST_KEY_BACKUP_STANDBY});
$oActualManifest->set(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_BUFFER_SIZE, undef,
${$oExpectedManifestRef}{&MANIFEST_SECTION_BACKUP_OPTION}{&MANIFEST_KEY_BUFFER_SIZE});
$oActualManifest->set(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS, undef,
@ -2201,6 +2215,13 @@ sub restoreCompare
MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE, undef,
$oExpectedManifestRef->{&MANIFEST_SECTION_BACKUP}{&MANIFEST_KEY_TYPE});
if (defined($oExpectedManifestRef->{&MANIFEST_SECTION_BACKUP}{'backup-bundle'}))
{
$oActualManifest->set(
MANIFEST_SECTION_BACKUP, 'backup-bundle', undef,
$oExpectedManifestRef->{&MANIFEST_SECTION_BACKUP}{'backup-bundle'});
}
$oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LSN_START, undef,
${$oExpectedManifestRef}{&MANIFEST_SECTION_BACKUP}{&MANIFEST_KEY_LSN_START});
$oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LSN_STOP, undef,

View File

@ -146,7 +146,8 @@ sub setup
bHardlink => $bHostBackup ? undef : $$oConfigParam{bHardLink},
bArchiveAsync => $$oConfigParam{bArchiveAsync},
strStorage => $oConfigParam->{strStorage},
iRepoTotal => $oConfigParam->{iRepoTotal}});
iRepoTotal => $oConfigParam->{iRepoTotal},
bBundle => $oConfigParam->{bBundle}});
# Create backup config if backup host exists
if (defined($oHostBackup))
@ -156,7 +157,8 @@ sub setup
strCompressType => $$oConfigParam{strCompressType},
bHardlink => $$oConfigParam{bHardLink},
strStorage => $oConfigParam->{strStorage},
iRepoTotal => $oConfigParam->{iRepoTotal}});
iRepoTotal => $oConfigParam->{iRepoTotal},
bBundle => $oConfigParam->{bBundle}});
}
# If backup host is not defined set it to db-primary
else
@ -184,7 +186,8 @@ sub setup
bHardlink => $bHostBackup ? undef : $$oConfigParam{bHardLink},
bArchiveAsync => $$oConfigParam{bArchiveAsync},
strStorage => $oConfigParam->{strStorage},
iRepoTotal => $oConfigParam->{iRepoTotal}});
iRepoTotal => $oConfigParam->{iRepoTotal},
bBundle => $oConfigParam->{bBundle}});
}
# Create object storage

View File

@ -52,18 +52,18 @@ sub run
foreach my $rhRun
(
{pg => PG_VERSION_90, repoDest => HOST_DB_PRIMARY, tls => 0, storage => GCS, encrypt => 1, compress => BZ2, repo => 2},
{pg => PG_VERSION_91, repoDest => HOST_DB_STANDBY, tls => 1, storage => GCS, encrypt => 0, compress => GZ, repo => 1},
{pg => PG_VERSION_92, repoDest => HOST_DB_STANDBY, tls => 0, storage => POSIX, encrypt => 1, compress => NONE, repo => 1},
{pg => PG_VERSION_93, repoDest => HOST_BACKUP, tls => 0, storage => AZURE, encrypt => 0, compress => NONE, repo => 2},
{pg => PG_VERSION_94, repoDest => HOST_DB_STANDBY, tls => 0, storage => POSIX, encrypt => 1, compress => LZ4, repo => 1},
{pg => PG_VERSION_95, repoDest => HOST_BACKUP, tls => 1, storage => S3, encrypt => 0, compress => BZ2, repo => 1},
{pg => PG_VERSION_96, repoDest => HOST_BACKUP, tls => 0, storage => POSIX, encrypt => 0, compress => NONE, repo => 2},
{pg => PG_VERSION_10, repoDest => HOST_DB_STANDBY, tls => 1, storage => S3, encrypt => 1, compress => GZ, repo => 2},
{pg => PG_VERSION_11, repoDest => HOST_BACKUP, tls => 1, storage => AZURE, encrypt => 0, compress => ZST, repo => 2},
{pg => PG_VERSION_12, repoDest => HOST_BACKUP, tls => 0, storage => S3, encrypt => 1, compress => LZ4, repo => 1},
{pg => PG_VERSION_13, repoDest => HOST_DB_STANDBY, tls => 1, storage => GCS, encrypt => 0, compress => ZST, repo => 1},
{pg => PG_VERSION_14, repoDest => HOST_BACKUP, tls => 0, storage => POSIX, encrypt => 1, compress => LZ4, repo => 2},
{pg => '9.0', repoDest => HOST_DB_PRIMARY, tls => 0, storage => GCS, encrypt => 1, compress => BZ2, repo => 2, bnd => 1},
{pg => '9.1', repoDest => HOST_DB_STANDBY, tls => 1, storage => GCS, encrypt => 0, compress => GZ, repo => 1, bnd => 0},
{pg => '9.2', repoDest => HOST_DB_STANDBY, tls => 0, storage => POSIX, encrypt => 1, compress => NONE, repo => 1, bnd => 1},
{pg => '9.3', repoDest => HOST_BACKUP, tls => 0, storage => AZURE, encrypt => 0, compress => NONE, repo => 2, bnd => 0},
{pg => '9.4', repoDest => HOST_DB_STANDBY, tls => 0, storage => POSIX, encrypt => 1, compress => LZ4, repo => 1, bnd => 1},
{pg => '9.5', repoDest => HOST_BACKUP, tls => 1, storage => S3, encrypt => 0, compress => BZ2, repo => 1, bnd => 0},
{pg => '9.6', repoDest => HOST_BACKUP, tls => 0, storage => POSIX, encrypt => 0, compress => NONE, repo => 2, bnd => 1},
{pg => '10', repoDest => HOST_DB_STANDBY, tls => 1, storage => S3, encrypt => 1, compress => GZ, repo => 2, bnd => 0},
{pg => '11', repoDest => HOST_BACKUP, tls => 1, storage => AZURE, encrypt => 0, compress => ZST, repo => 2, bnd => 1},
{pg => '12', repoDest => HOST_BACKUP, tls => 0, storage => S3, encrypt => 1, compress => LZ4, repo => 1, bnd => 0},
{pg => '13', repoDest => HOST_DB_STANDBY, tls => 1, storage => GCS, encrypt => 0, compress => ZST, repo => 1, bnd => 1},
{pg => '14', repoDest => HOST_BACKUP, tls => 0, storage => POSIX, encrypt => 1, compress => LZ4, repo => 2, bnd => 0},
)
{
# Only run tests for this pg version
@ -78,6 +78,7 @@ sub run
my $bRepoEncrypt = $rhRun->{encrypt};
my $strCompressType = $rhRun->{compress};
my $iRepoTotal = $rhRun->{repo};
my $bBundle = $rhRun->{bnd};
# Use a specific VM and version of PostgreSQL for expect testing. This version will also be used to run tests that are not
# version specific.
@ -94,7 +95,7 @@ sub run
false, $self->expect(),
{bHostBackup => $bHostBackup, bStandby => $bHostStandby, bTls => $bTls, strBackupDestination => $strBackupDestination,
strCompressType => $strCompressType, bArchiveAsync => false, strStorage => $strStorage,
bRepoEncrypt => $bRepoEncrypt, iRepoTotal => $iRepoTotal});
bRepoEncrypt => $bRepoEncrypt, iRepoTotal => $iRepoTotal, bBundle => $bBundle});
# Some commands will fail because of the bogus host created when a standby is present. These options reset the bogus host
# so it won't interfere with commands that won't tolerate a connection failure.

View File

@ -42,95 +42,141 @@ testBackupValidateCallback(void *callbackData, const StorageInfo *info)
(strEqZ(info->name, BACKUP_MANIFEST_FILE) || strEqZ(info->name, BACKUP_MANIFEST_FILE INFO_COPY_EXT)))
return;
// Get manifest name
const String *manifestName = info->name;
strCatFmt(data->content, "%s {", strZ(info->name));
switch (info->type)
{
case storageTypeFile:
{
strCatZ(data->content, "file");
// Calculate checksum/size and decompress if needed
// ---------------------------------------------------------------------------------------------------------------------
StorageRead *read = storageNewReadP(
data->storage, data->path != NULL ? strNewFmt("%s/%s", strZ(data->path), strZ(info->name)) : info->name);
if (data->manifestData->backupOptionCompressType != compressTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)), decompressFilter(data->manifestData->backupOptionCompressType));
manifestName = strSubN(
info->name, 0, strSize(info->name) - strSize(compressExtStr(data->manifestData->backupOptionCompressType)));
}
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), cryptoHashNew(HASH_TYPE_SHA1_STR));
uint64_t size = bufUsed(storageGetP(read));
const String *checksum = pckReadStrP(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE));
strCatFmt(data->content, ", s=%" PRIu64, size);
// Check against the manifest
// ---------------------------------------------------------------------------------------------------------------------
ManifestFilePack **const filePack = manifestFilePackFindInternal(data->manifest, manifestName);
ManifestFile file = manifestFileUnpack(data->manifest, *filePack);
// Test size and repo-size. If compressed then set the repo-size to size so it will not be in test output. Even the same
// compression algorithm can give slightly different results based on the version so repo-size is not deterministic for
// compression.
if (size != file.size)
THROW_FMT(AssertError, "'%s' size does match manifest", strZ(manifestName));
if (info->size != file.sizeRepo)
THROW_FMT(AssertError, "'%s' repo size does match manifest", strZ(manifestName));
if (data->manifestData->backupOptionCompressType != compressTypeNone)
file.sizeRepo = file.size;
// Test the checksum. pg_control and WAL headers have different checksums depending on cpu architecture so remove
// the checksum from the test output.
if (!strEqZ(checksum, file.checksumSha1))
THROW_FMT(AssertError, "'%s' checksum does match manifest", strZ(manifestName));
if (strEqZ(manifestName, MANIFEST_TARGET_PGDATA "/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL) ||
strBeginsWith(
manifestName, strNewFmt(MANIFEST_TARGET_PGDATA "/%s/", strZ(pgWalPath(data->manifestData->pgVersion)))))
{
file.checksumSha1[0] = '\0';
}
// Test mode, user, group. These values are not in the manifest but we know what they should be based on the default
// mode and current user/group.
// ---------------------------------------------------------------------------------------------------------------------
if (info->mode != 0640)
THROW_FMT(AssertError, "'%s' mode is not 0640", strZ(manifestName));
THROW_FMT(AssertError, "'%s' mode is not 0640", strZ(info->name));
if (!strEq(info->user, TEST_USER_STR))
THROW_FMT(AssertError, "'%s' user should be '" TEST_USER "'", strZ(manifestName));
THROW_FMT(AssertError, "'%s' user should be '" TEST_USER "'", strZ(info->name));
if (!strEq(info->group, TEST_GROUP_STR))
THROW_FMT(AssertError, "'%s' group should be '" TEST_GROUP "'", strZ(manifestName));
THROW_FMT(AssertError, "'%s' group should be '" TEST_GROUP "'", strZ(info->name));
// Update changes to manifest file
manifestFilePackUpdate(data->manifest, filePack, &file);
// Build file list (needed because bundles can contain multiple files)
// ---------------------------------------------------------------------------------------------------------------------
List *const fileList = lstNewP(sizeof(ManifestFilePack **));
bool bundle = strBeginsWithZ(info->name, "bundle/");
if (bundle)
{
const uint64_t bundleId = cvtZToUInt64(strZ(strSub(info->name, sizeof("bundle"))));
for (unsigned int fileIdx = 0; fileIdx < manifestFileTotal(data->manifest); fileIdx++)
{
ManifestFilePack **const filePack = lstGet(data->manifest->pub.fileList, fileIdx);
if (manifestFileUnpack(data->manifest, *filePack).bundleId == bundleId)
lstAdd(fileList, &filePack);
}
}
else
{
const String *manifestName = info->name;
if (data->manifestData->backupOptionCompressType != compressTypeNone)
{
manifestName = strSubN(
info->name, 0, strSize(info->name) - strSize(compressExtStr(data->manifestData->backupOptionCompressType)));
}
ManifestFilePack **const filePack = manifestFilePackFindInternal(data->manifest, manifestName);
lstAdd(fileList, &filePack);
}
// Check files
// ---------------------------------------------------------------------------------------------------------------------
for (unsigned int fileIdx = 0; fileIdx < lstSize(fileList); fileIdx++)
{
ManifestFilePack **const filePack = *(ManifestFilePack ***)lstGet(fileList, fileIdx);
ManifestFile file = manifestFileUnpack(data->manifest, *filePack);
if (bundle)
strCatFmt(data->content, "%s/%s {file", strZ(info->name), strZ(file.name));
else
strCatFmt(data->content, "%s {file", strZ(info->name));
// Calculate checksum/size and decompress if needed
// -----------------------------------------------------------------------------------------------------------------
StorageRead *read = storageNewReadP(
data->storage, strNewFmt("%s/%s", strZ(data->path), strZ(info->name)), .offset = file.bundleOffset,
.limit = VARUINT64(file.sizeRepo));
if (data->manifestData->backupOptionCompressType != compressTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)), decompressFilter(data->manifestData->backupOptionCompressType));
}
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), cryptoHashNew(HASH_TYPE_SHA1_STR));
uint64_t size = bufUsed(storageGetP(read));
const String *checksum = pckReadStrP(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE));
strCatFmt(data->content, ", s=%" PRIu64, size);
if (!strEqZ(checksum, file.checksumSha1))
THROW_FMT(AssertError, "'%s' checksum does match manifest", strZ(file.name));
// Test size and repo-size. If compressed then set the repo-size to size so it will not be in test output. Even the
// same compression algorithm can give slightly different results based on the version so repo-size is not
// deterministic for compression.
// -----------------------------------------------------------------------------------------------------------------
if (size != file.size)
THROW_FMT(AssertError, "'%s' size does match manifest", strZ(file.name));
// Repo size can only be compared to file size when not bundled
if (!bundle)
{
if (info->size != file.sizeRepo)
THROW_FMT(AssertError, "'%s' repo size does match manifest", strZ(file.name));
}
if (data->manifestData->backupOptionCompressType != compressTypeNone)
file.sizeRepo = file.size;
// Bundle id/offset are too noisy so remove them. They are checked size/checksum and listed with the files.
// -----------------------------------------------------------------------------------------------------------------
file.bundleId = 0;
file.bundleOffset = 0;
// pg_control and WAL headers have different checksums depending on cpu architecture so remove the checksum from the
// test output.
// -----------------------------------------------------------------------------------------------------------------
if (strEqZ(file.name, MANIFEST_TARGET_PGDATA "/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL) ||
strBeginsWith(
file.name, strNewFmt(MANIFEST_TARGET_PGDATA "/%s/", strZ(pgWalPath(data->manifestData->pgVersion)))))
{
file.checksumSha1[0] = '\0';
}
strCatZ(data->content, "}\n");
// Update changes to manifest file
manifestFilePackUpdate(data->manifest, filePack, &file);
}
break;
}
case storageTypeLink:
strCatFmt(data->content, "link, d=%s", strZ(info->linkDestination));
strCatFmt(data->content, "%s {link, d=%s}\n", strZ(info->name), strZ(info->linkDestination));
break;
case storageTypePath:
{
strCatZ(data->content, "path");
strCatFmt(data->content, "%s {path", strZ(info->name));
// Check against the manifest
// ---------------------------------------------------------------------------------------------------------------------
manifestPathFind(data->manifest, info->name);
if (!strEq(info->name, STRDEF("bundle")))
manifestPathFind(data->manifest, info->name);
// Test mode, user, group. These values are not in the manifest but we know what they should be based on the default
// mode and current user/group.
@ -143,6 +189,7 @@ testBackupValidateCallback(void *callbackData, const StorageInfo *info)
if (!strEq(info->group, TEST_GROUP_STR))
THROW_FMT(AssertError, "'%s' group should be '" TEST_GROUP "'", strZ(info->name));
strCatZ(data->content, "}\n");
break;
}
@ -150,7 +197,6 @@ testBackupValidateCallback(void *callbackData, const StorageInfo *info)
THROW_FMT(AssertError, "unexpected special file '%s'", strZ(info->name));
}
strCatZ(data->content, "}\n");
}
static String *
@ -161,6 +207,9 @@ testBackupValidate(const Storage *storage, const String *path)
FUNCTION_HARNESS_PARAM(STRING, path);
FUNCTION_HARNESS_END();
ASSERT(storage != NULL);
ASSERT(path != NULL);
String *result = strNew();
MEM_CONTEXT_TEMP_BEGIN()
@ -599,11 +648,28 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("pg file missing - ignoreMissing=true");
List *fileList = lstNewP(sizeof(BackupFile));
BackupFile file =
{
.pgFile = missingFile,
.pgFileIgnoreMissing = true,
.pgFileSize = 0,
.pgFileCopyExactSize = true,
.pgFileChecksum = NULL,
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = missingFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
const String *repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
TEST_ASSIGN(
result,
backupFile(
missingFile, true, 0, true, NULL, false, 0, missingFile, false, compressTypeNone, 1, backupLabel, false,
cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, false, cipherTypeNone, NULL, fileList), 0),
"pg file missing, ignoreMissing=true, no delta");
TEST_RESULT_UINT(result.copySize + result.repoSize, 0, "copy/repo size 0");
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultSkip, "skip file");
@ -611,11 +677,26 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("pg file missing - ignoreMissing=false");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = missingFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 0,
.pgFileCopyExactSize = true,
.pgFileChecksum = NULL,
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = missingFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
TEST_ERROR(
backupFile(
missingFile, false, 0, true, NULL, false, 0, missingFile, false, compressTypeNone, 1, backupLabel, false,
cipherTypeNone, NULL),
FileMissingError, "unable to open missing file '" TEST_PATH "/pg/missing' for read");
backupFile(repoFile, compressTypeNone, 1, false, cipherTypeNone, NULL, fileList), FileMissingError,
"unable to open missing file '" TEST_PATH "/pg/missing' for read");
// Create a pg file to backup
HRN_STORAGE_PUT_Z(storagePgWrite(), strZ(pgFile), "atestfile");
@ -630,11 +711,28 @@ testRun(void)
// where a file grows while a backup is running.
HRN_STORAGE_PUT_Z(storagePgWrite(), strZ(pgFile), "atestfile###");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = NULL,
.pgFileChecksumPage = true,
.pgFileChecksumPageLsnLimit = 0xFFFFFFFFFFFFFFFF,
.manifestFile = pgFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9, true, NULL, true, 0xFFFFFFFFFFFFFFFF, pgFile, false, compressTypeNone, 1, backupLabel, false,
cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, false, cipherTypeNone, NULL, fileList), 0),
"file checksummed with pageChecksum enabled");
TEST_RESULT_UINT(result.copySize, 9, "copy=pgFile size");
TEST_RESULT_UINT(result.repoSize, 9, "repo=pgFile size");
@ -646,11 +744,26 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("pgFileSize, ignoreMissing=false, backupLabel, pgFileChecksumPage, pgFileChecksumPageLsnLimit");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 8,
.pgFileCopyExactSize = false,
.pgFileChecksum = NULL,
.pgFileChecksumPage = true,
.pgFileChecksumPageLsnLimit = 0xFFFFFFFFFFFFFFFF,
.manifestFile = pgFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 8, false, NULL, true, 0xFFFFFFFFFFFFFFFF, pgFile, false, compressTypeNone, 1, backupLabel, false,
cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, false, cipherTypeNone, NULL, fileList), 0),
"backup file");
TEST_RESULT_UINT(result.copySize, 12, "copy size");
TEST_RESULT_UINT(result.repoSize, 12, "repo size");
@ -662,12 +775,27 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("file exists in repo and db, checksum match - NOOP");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = true,
};
lstAdd(fileList, &file);
// File exists in repo and db, pg checksum match, delta set, ignoreMissing false, hasReference - NOOP
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9, true, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, true,
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
"file in db and repo, checksum equal, no ignoreMissing, no pageChecksum, delta, hasReference");
TEST_RESULT_UINT(result.copySize, 9, "copy size set");
TEST_RESULT_UINT(result.repoSize, 0, "repo size not set since already exists in repo");
@ -679,12 +807,27 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("file exists in repo and db, checksum mismatch - COPY");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = STRDEF("1234567890123456789012345678901234567890"),
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = true,
};
lstAdd(fileList, &file);
// File exists in repo and db, pg checksum mismatch, delta set, ignoreMissing false, hasReference - COPY
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9, true, STRDEF("1234567890123456789012345678901234567890"), false, 0, pgFile, true,
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
"file in db and repo, pg checksum not equal, no ignoreMissing, no pageChecksum, delta, hasReference");
TEST_RESULT_UINT(result.copySize, 9, "copy 9 bytes");
TEST_RESULT_UINT(result.repoSize, 9, "repo=copy size");
@ -696,12 +839,27 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("file exists in repo and pg, copy only exact file even if size passed is greater - COPY");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9999999,
.pgFileCopyExactSize = true,
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = true,
};
lstAdd(fileList, &file);
// File exists in repo and pg, pg checksum same, pg size passed is different, delta set, ignoreMissing false, hasReference
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9999999, true, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, true,
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
"db & repo file, pg checksum same, pg size different, no ignoreMissing, no pageChecksum, delta, hasReference");
TEST_RESULT_UINT(result.copySize, 12, "copy=pgFile size");
TEST_RESULT_UINT(result.repoSize, 12, "repo=pgFile size");
@ -713,13 +871,30 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("resumed file is missing in repo but present in resumed manifest, file same name in repo - RECOPY");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = STRDEF(BOGUS_STR),
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
TEST_STORAGE_LIST(
storageRepo(), STORAGE_REPO_BACKUP "/20190718-155825F", "testfile\n", .comment = "resumed file is missing in repo");
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9, true, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, STRDEF(BOGUS_STR), false,
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
"backup 9 bytes of pgfile to file to resume in repo");
TEST_RESULT_UINT(result.copySize, 9, "copy 9 bytes");
TEST_RESULT_UINT(result.repoSize, 9, "repo=copy size");
@ -738,12 +913,29 @@ testRun(void)
storageRepoWrite(), strZ(backupPathFile), "adifferentfile",
.comment = "create different file (size and checksum) with same name in repo");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
// Delta set, ignoreMissing false, no hasReference
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9, true, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, false,
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
"db & repo file, pgFileMatch, repo checksum no match, no ignoreMissing, no pageChecksum, delta, no hasReference");
TEST_RESULT_UINT(result.copySize, 9, "copy 9 bytes");
TEST_RESULT_UINT(result.repoSize, 9, "repo=copy size");
@ -755,11 +947,26 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("file exists in repo but missing from db, checksum same in repo - SKIP");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = missingFile,
.pgFileIgnoreMissing = true,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
TEST_ASSIGN(
result,
backupFile(
missingFile, true, 9, true, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, false,
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
"file in repo only, checksum in repo equal, ignoreMissing=true, no pageChecksum, delta, no hasReference");
TEST_RESULT_UINT(result.copySize + result.repoSize, 0, "copy=repo=0 size");
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultSkip, "skip file");
@ -771,10 +978,28 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("compression set, all other boolean parameters false - COPY");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = NULL,
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(backupLabel), strZ(file.manifestFile));
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9, true, NULL, false, 0, pgFile, false, compressTypeGz, 3, backupLabel, false, cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeGz, 3, false, cipherTypeNone, NULL, fileList), 0),
"pg file exists, no checksum, no ignoreMissing, compression, no pageChecksum, no delta, no hasReference");
TEST_RESULT_UINT(result.copySize, 9, "copy=pgFile size");
TEST_RESULT_UINT(result.repoSize, 29, "repo compress size");
@ -788,11 +1013,26 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("pg and repo file exist & match, prior checksum, compression - COPY CHECKSUM");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9, true, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, false, compressTypeGz,
3, backupLabel, false, cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeGz, 3, false, cipherTypeNone, NULL, fileList), 0),
"pg file & repo exists, match, checksum, no ignoreMissing, compression, no pageChecksum, no delta, no hasReference");
TEST_RESULT_UINT(result.copySize, 9, "copy=pgFile size");
TEST_RESULT_UINT(result.repoSize, 0, "repo size not calculated");
@ -808,12 +1048,29 @@ testRun(void)
// Create zero sized file in pg
HRN_STORAGE_PUT_EMPTY(storagePgWrite(), "zerofile");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = STRDEF("zerofile"),
.pgFileIgnoreMissing = false,
.pgFileSize = 0,
.pgFileCopyExactSize = true,
.pgFileChecksum = NULL,
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = STRDEF("zerofile"),
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
// No prior checksum, no compression, no pageChecksum, no delta, no hasReference
TEST_ASSIGN(
result,
backupFile(
STRDEF("zerofile"), false, 0, true, NULL, false, 0, STRDEF("zerofile"), false, compressTypeNone, 1, backupLabel,
false, cipherTypeNone, NULL),
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, false, cipherTypeNone, NULL, fileList), 0),
"zero-sized pg file exists, no repo file, no ignoreMissing, no pageChecksum, no delta, no hasReference");
TEST_RESULT_UINT(result.copySize + result.repoSize, 0, "copy=repo=pgFile size 0");
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, "copy file");
@ -843,12 +1100,30 @@ testRun(void)
// Create the pg path and pg file to backup
HRN_STORAGE_PUT_Z(storagePgWrite(), strZ(pgFile), "atestfile");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = NULL,
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
// No prior checksum, no compression, no pageChecksum, no delta, no hasReference
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9, true, NULL, false, 0, pgFile, false, compressTypeNone, 1, backupLabel, false, cipherTypeAes256Cbc,
STRDEF(TEST_CIPHER_PASS)),
*(BackupFileResult *)lstGet(
backupFile(repoFile, compressTypeNone, 1, false, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
"pg file exists, no repo file, no ignoreMissing, no pageChecksum, no delta, no hasReference");
TEST_RESULT_UINT(result.copySize, 9, "copy size set");
TEST_RESULT_UINT(result.repoSize, 32, "repo size set");
@ -862,12 +1137,28 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("delta, copy file (size mismatch) to encrypted repo");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 8,
.pgFileCopyExactSize = true,
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
// Delta but pgFile does not match size passed, prior checksum, no compression, no pageChecksum, delta, no hasReference
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 8, true, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, false,
compressTypeNone, 1, backupLabel, true, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS)),
*(BackupFileResult *)lstGet(
backupFile(repoFile, compressTypeNone, 1, true, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
"pg and repo file exists, pgFileMatch false, no ignoreMissing, no pageChecksum, delta, no hasReference");
TEST_RESULT_UINT(result.copySize, 8, "copy size set");
TEST_RESULT_UINT(result.repoSize, 32, "repo size set");
@ -881,11 +1172,27 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("no delta, recopy (size mismatch) file to encrypted repo");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9, true, STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, false,
compressTypeNone, 0, backupLabel, false, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS)),
*(BackupFileResult *)lstGet(
backupFile(repoFile, compressTypeNone, 0, false, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
"pg and repo file exists, checksum mismatch, no ignoreMissing, no pageChecksum, no delta, no hasReference");
TEST_RESULT_UINT(result.copySize, 9, "copy size set");
TEST_RESULT_UINT(result.repoSize, 32, "repo size set");
@ -899,11 +1206,27 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("no delta, recopy (checksum mismatch), file to encrypted repo");
fileList = lstNewP(sizeof(BackupFile));
file = (BackupFile)
{
.pgFile = pgFile,
.pgFileIgnoreMissing = false,
.pgFileSize = 9,
.pgFileCopyExactSize = true,
.pgFileChecksum = STRDEF("1234567890123456789012345678901234567890"),
.pgFileChecksumPage = false,
.pgFileChecksumPageLsnLimit = 0,
.manifestFile = pgFile,
.manifestFileHasReference = false,
};
lstAdd(fileList, &file);
TEST_ASSIGN(
result,
backupFile(
pgFile, false, 9, true, STRDEF("1234567890123456789012345678901234567890"), false, 0, pgFile, false,
compressTypeNone, 0, backupLabel, false, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS)),
*(BackupFileResult *)lstGet(
backupFile(repoFile, compressTypeNone, 0, false, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
"backup file");
TEST_RESULT_UINT(result.copySize, 9, "copy size set");
@ -1247,7 +1570,7 @@ testRun(void)
TEST_RESULT_PTR(backupResumeFind((Manifest *)1, NULL), NULL, "find resumable backup");
TEST_RESULT_LOG("P00 WARN: backup '20191003-105320F' cannot be resumed: resume is disabled");
TEST_RESULT_LOG("P00 INFO: backup '20191003-105320F' cannot be resumed: resume is disabled");
TEST_STORAGE_LIST_EMPTY(storageRepo(), STORAGE_REPO_BACKUP, .comment = "check backup path removed");
@ -1373,6 +1696,30 @@ testRun(void)
TEST_STORAGE_LIST_EMPTY(storageRepo(), STORAGE_REPO_BACKUP, .comment = "check backup path removed");
manifestResume->pub.data.backupOptionCompressType = compressTypeNone;
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("cannot resume when bundling");
argList = strLstNew();
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
hrnCfgArgRawZ(argList, cfgOptPgPath, "/pg");
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeFull);
hrnCfgArgRawBool(argList, cfgOptBundle, true);
HRN_CFG_LOAD(cfgCmdBackup, argList);
manifestSave(
manifestResume,
storageWriteIo(
storageNewWriteP(
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT))));
TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup");
TEST_RESULT_LOG("P00 INFO: backup '20191003-105320F' cannot be resumed: resume is disabled");
TEST_STORAGE_LIST_EMPTY(storageRepo(), STORAGE_REPO_BACKUP, .comment = "check backup path removed");
}
// *****************************************************************************************************************************
@ -1387,7 +1734,8 @@ testRun(void)
ProtocolParallelJob *job = protocolParallelJobNew(VARSTRDEF("key"), protocolCommandNew(strIdFromZ("x")));
protocolParallelJobErrorSet(job, errorTypeCode(&AssertError), STRDEF("error message"));
TEST_ERROR(backupJobResult((Manifest *)1, NULL, STRDEF("log"), strLstNew(), job, 0, NULL), AssertError, "error message");
TEST_ERROR(
backupJobResult((Manifest *)1, NULL, storageTest, strLstNew(), job, false, 0, NULL), AssertError, "error message");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("report host/100% progress on noop result");
@ -1396,6 +1744,7 @@ testRun(void)
job = protocolParallelJobNew(VARSTRDEF("pg_data/test"), protocolCommandNew(strIdFromZ("x")));
PackWrite *const resultPack = protocolPackNew();
pckWriteStrP(resultPack, STRDEF("pg_data/test"));
pckWriteU32P(resultPack, backupCopyResultNoOp);
pckWriteU64P(resultPack, 0);
pckWriteU64P(resultPack, 0);
@ -1418,9 +1767,9 @@ testRun(void)
uint64_t sizeProgress = 0;
TEST_RESULT_VOID(
backupJobResult(manifest, STRDEF("host"), STRDEF("log-test"), strLstNew(), job, 0, &sizeProgress), "log noop result");
backupJobResult(manifest, STRDEF("host"), storageTest, strLstNew(), job, false, 0, &sizeProgress), "log noop result");
TEST_RESULT_LOG("P00 DETAIL: match file from prior backup host:log-test (0B, 100%)");
TEST_RESULT_LOG("P00 DETAIL: match file from prior backup host:" TEST_PATH "/test (0B, 100%)");
}
// Offline tests should only be used to test offline functionality and errors easily tested in offline mode
@ -1717,7 +2066,7 @@ testRun(void)
// Create a backup manifest that looks like a halted backup manifest
Manifest *manifestResume = manifestNewBuild(
storagePg(), PG_VERSION_95, hrnPgCatalogVersion(PG_VERSION_95), true, false, NULL, NULL);
storagePg(), PG_VERSION_95, hrnPgCatalogVersion(PG_VERSION_95), true, false, false, NULL, NULL);
ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume);
manifestResumeData->backupType = backupTypeFull;
@ -1808,7 +2157,7 @@ testRun(void)
// Create a backup manifest that looks like a halted backup manifest
Manifest *manifestResume = manifestNewBuild(
storagePg(), PG_VERSION_95, hrnPgCatalogVersion(PG_VERSION_95), true, false, NULL, NULL);
storagePg(), PG_VERSION_95, hrnPgCatalogVersion(PG_VERSION_95), true, false, false, NULL, NULL);
ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume);
manifestResumeData->backupType = backupTypeFull;
@ -1998,7 +2347,7 @@ testRun(void)
// Create a backup manifest that looks like a halted backup manifest
Manifest *manifestResume = manifestNewBuild(
storagePg(), PG_VERSION_95, hrnPgCatalogVersion(PG_VERSION_95), true, false, NULL, NULL);
storagePg(), PG_VERSION_95, hrnPgCatalogVersion(PG_VERSION_95), true, false, false, NULL, NULL);
ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume);
manifestResumeData->backupType = backupTypeDiff;
@ -2621,6 +2970,138 @@ testRun(void)
"pg_tblspc/32768/PG_11_201809051/1={}\n",
"compare file list");
}
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("online 11 full backup with tablespaces and bundles");
backupTimeStart = BACKUP_EPOCH + 2400000;
{
// Load options
StringList *argList = strLstNew();
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeFull);
hrnCfgArgRawZ(argList, cfgOptManifestSaveThreshold, "1");
hrnCfgArgRawBool(argList, cfgOptArchiveCopy, true);
hrnCfgArgRawZ(argList, cfgOptBufferSize, "16K");
hrnCfgArgRawBool(argList, cfgOptBundle, true);
HRN_CFG_LOAD(cfgCmdBackup, argList);
// Set to a smaller values than the defaults allow
cfgOptionSet(cfgOptBundleSize, cfgSourceParam, VARINT64(PG_PAGE_SIZE_DEFAULT));
// Zeroed file which passes page checksums
Buffer *relation = bufNew(PG_PAGE_SIZE_DEFAULT * 3);
memset(bufPtr(relation), 0, bufSize(relation));
bufUsedSet(relation, bufSize(relation));
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/2", relation, .timeModified = backupTimeStart);
// Old files
HRN_STORAGE_PUT_Z(storagePgWrite(), "postgresql.auto.conf", "CONFIGSTUFF2", .timeModified = 1500000000);
HRN_STORAGE_PUT_Z(storagePgWrite(), "stuff.conf", "CONFIGSTUFF3", .timeModified = 1500000000);
// File that will get skipped while bundling smaller files and end up a bundle by itself
Buffer *bigish = bufNew(PG_PAGE_SIZE_DEFAULT - 1);
memset(bufPtr(bigish), 0, bufSize(bigish));
bufUsedSet(bigish, bufSize(bigish));
HRN_STORAGE_PUT(storagePgWrite(), "bigish.dat", bigish, .timeModified = 1500000001);
// Run backup
testBackupPqScriptP(PG_VERSION_11, backupTimeStart, .walCompressType = compressTypeGz, .walTotal = 2);
TEST_RESULT_VOID(cmdBackup(), "backup");
TEST_RESULT_LOG(
"P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
"P00 INFO: backup start archive = 0000000105DB8EB000000000, lsn = 5db8eb0/0\n"
"P00 INFO: check archive for segment 0000000105DB8EB000000000\n"
"P00 DETAIL: store zero-length file " TEST_PATH "/pg1/pg_tblspc/32768/PG_11_201809051/1/5\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/base/1/2 (24KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/base/1/1 (8KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/stuff.conf (12B, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/postgresql.auto.conf (12B, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (2B, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/bigish.dat (8.0KB, [PCT]) checksum [SHA1]\n"
"P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
"P00 INFO: backup stop archive = 0000000105DB8EB000000001, lsn = 5db8eb0/180000\n"
"P00 DETAIL: wrote 'backup_label' file returned from pg_stop_backup()\n"
"P00 DETAIL: wrote 'tablespace_map' file returned from pg_stop_backup()\n"
"P00 INFO: check archive for segment(s) 0000000105DB8EB000000000:0000000105DB8EB000000001\n"
"P00 DETAIL: copy segment 0000000105DB8EB000000000 to backup\n"
"P00 DETAIL: copy segment 0000000105DB8EB000000001 to backup\n"
"P00 INFO: new backup label = 20191030-014640F\n"
"P00 INFO: full backup size = [SIZE], file total = 13");
TEST_RESULT_STR_Z(
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
". {link, d=20191030-014640F}\n"
"bundle {path}\n"
"bundle/1/pg_data/base/1/2 {file, s=24576}\n"
"bundle/2/pg_data/base/1/1 {file, s=8192}\n"
"bundle/3/pg_data/global/pg_control {file, s=8192}\n"
"bundle/4/pg_data/PG_VERSION {file, s=2}\n"
"bundle/4/pg_data/postgresql.auto.conf {file, s=12}\n"
"bundle/4/pg_data/postgresql.conf {file, s=11}\n"
"bundle/4/pg_data/stuff.conf {file, s=12}\n"
"bundle/5/pg_data/bigish.dat {file, s=8191}\n"
"pg_data {path}\n"
"pg_data/backup_label.gz {file, s=17}\n"
"pg_data/pg_wal {path}\n"
"pg_data/pg_wal/0000000105DB8EB000000000.gz {file, s=1048576}\n"
"pg_data/pg_wal/0000000105DB8EB000000001.gz {file, s=1048576}\n"
"pg_data/tablespace_map.gz {file, s=19}\n"
"--------\n"
"[backup:target]\n"
"pg_data={\"path\":\"" TEST_PATH "/pg1\",\"type\":\"path\"}\n"
"pg_tblspc/32768={\"path\":\"../../pg1-tblspc/32768\",\"tablespace-id\":\"32768\""
",\"tablespace-name\":\"tblspc32768\",\"type\":\"link\"}\n"
"\n"
"[target:file]\n"
"pg_data/PG_VERSION={\"checksum\":\"17ba0791499db908433b80f37c5fbc89b870084b\",\"size\":2"
",\"timestamp\":1572200000}\n"
"pg_data/backup_label={\"checksum\":\"8e6f41ac87a7514be96260d65bacbffb11be77dc\",\"size\":17"
",\"timestamp\":1572400002}\n"
"pg_data/base/1/1={\"checksum\":\"0631457264ff7f8d5fb1edc2c0211992a67c73e6\",\"checksum-page\":true,\"size\":8192"
",\"timestamp\":1572200000}\n"
"pg_data/base/1/2={\"checksum\":\"ebdd38b69cd5b9f2d00d273c981e16960fbbb4f7\",\"checksum-page\":true,\"size\":24576"
",\"timestamp\":1572400000}\n"
"pg_data/bigish.dat={\"checksum\":\"3e5175386be683d2f231f3fa3eab892a799082f7\",\"size\":8191"
",\"timestamp\":1500000001}\n"
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1572400000}\n"
"pg_data/pg_wal/0000000105DB8EB000000000={\"size\":1048576,\"timestamp\":1572400002}\n"
"pg_data/pg_wal/0000000105DB8EB000000001={\"size\":1048576,\"timestamp\":1572400002}\n"
"pg_data/postgresql.auto.conf={\"checksum\":\"e873a5cb5a67e48761e7b619c531311404facdce\",\"size\":12"
",\"timestamp\":1500000000}\n"
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\",\"size\":11"
",\"timestamp\":1570000000}\n"
"pg_data/stuff.conf={\"checksum\":\"55a9d0d18b77789c7722abe72aa905e2dc85bb5d\",\"size\":12"
",\"timestamp\":1500000000}\n"
"pg_data/tablespace_map={\"checksum\":\"87fe624d7976c2144e10afcb7a9a49b071f35e9c\",\"size\":19"
",\"timestamp\":1572400002}\n"
"pg_tblspc/32768/PG_11_201809051/1/5={\"checksum-page\":true,\"size\":0,\"timestamp\":1572200000}\n"
"\n"
"[target:link]\n"
"pg_data/pg_tblspc/32768={\"destination\":\"../../pg1-tblspc/32768\"}\n"
"\n"
"[target:path]\n"
"pg_data={}\n"
"pg_data/base={}\n"
"pg_data/base/1={}\n"
"pg_data/global={}\n"
"pg_data/pg_tblspc={}\n"
"pg_data/pg_wal={}\n"
"pg_tblspc={}\n"
"pg_tblspc/32768={}\n"
"pg_tblspc/32768/PG_11_201809051={}\n"
"pg_tblspc/32768/PG_11_201809051/1={}\n",
"compare file list");
}
}
FUNCTION_HARNESS_RETURN_VOID();

View File

@ -175,9 +175,9 @@ testRun(void)
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("sparse-zero"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), true, 0x10000000000UL, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 0, true, false, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("sparse-zero"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), true, 0x10000000000UL, 1557432154, 0600,
TEST_USER_STR, TEST_GROUP_STR, 0, true, false, NULL),
false, "zero sparse 1TB file");
TEST_RESULT_UINT(storageInfoP(storagePg(), STRDEF("sparse-zero")).size, 0x10000000000UL, "check size");
@ -186,9 +186,9 @@ testRun(void)
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("normal-zero"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR, 0,
false, false, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("normal-zero"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, 1557432154, 0600,
TEST_USER_STR, TEST_GROUP_STR, 0, false, false, NULL),
true, "zero-length file");
TEST_RESULT_UINT(storageInfoP(storagePg(), STRDEF("normal-zero")).size, 0, "check size");
@ -202,9 +202,9 @@ testRun(void)
TEST_ERROR(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeGz, STRDEF("normal"),
STRDEF("ffffffffffffffffffffffffffffffffffffffff"), false, 7, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR, 0,
false, false, STRDEF("badpass")),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeGz,
STRDEF("normal"), STRDEF("ffffffffffffffffffffffffffffffffffffffff"), false, 7, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 0, false, false, STRDEF("badpass")),
ChecksumError,
"error restoring 'normal': actual checksum 'd1cd8a7d11daa26814b93eb604e1d49ab4b43770' does not match expected checksum"
" 'ffffffffffffffffffffffffffffffffffffffff'");
@ -218,9 +218,9 @@ testRun(void)
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeGz, STRDEF("normal"),
STRDEF("d1cd8a7d11daa26814b93eb604e1d49ab4b43770"), false, 7, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR, 0,
false, false, STRDEF("badpass")),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeGz,
STRDEF("normal"), STRDEF("d1cd8a7d11daa26814b93eb604e1d49ab4b43770"), false, 7, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 0, false, false, STRDEF("badpass")),
true, "copy file");
StorageInfo info = storageInfoP(storagePg(), STRDEF("normal"));
@ -242,9 +242,9 @@ testRun(void)
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("delta"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR, 0,
true, false, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("delta"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 0, true, false, NULL),
true, "sha1 delta missing");
TEST_STORAGE_GET(storagePg(), "delta", "atestfile", .comment = "check contents");
@ -256,18 +256,18 @@ testRun(void)
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("delta"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR, 0,
true, false, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("delta"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 0, true, false, NULL),
false, "sha1 delta existing");
ioBufferSizeSet(oldBufferSize);
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("delta"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR,
1557432155, true, true, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("delta"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 1557432155, true, true, NULL),
false, "sha1 delta force existing");
// -------------------------------------------------------------------------------------------------------------------------
@ -278,9 +278,9 @@ testRun(void)
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("delta"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR, 0,
true, false, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("delta"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 0, true, false, NULL),
true, "sha1 delta existing, size differs");
TEST_STORAGE_GET(storagePg(), "delta", "atestfile", .comment = "check contents");
@ -288,9 +288,9 @@ testRun(void)
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("delta"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR,
1557432155, true, true, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("delta"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 1557432155, true, true, NULL),
true, "delta force existing, size differs");
TEST_STORAGE_GET(storagePg(), "delta", "atestfile", .comment = "check contents");
@ -302,9 +302,9 @@ testRun(void)
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("delta"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR, 0,
true, false, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("delta"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 0, true, false, NULL),
true, "sha1 delta existing, content differs");
TEST_STORAGE_GET(storagePg(), "delta", "atestfile", .comment = "check contents");
@ -312,16 +312,16 @@ testRun(void)
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("delta"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR,
1557432155, true, true, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("delta"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 1557432155, true, true, NULL),
true, "delta force existing, timestamp differs");
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("delta"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR,
1557432153, true, true, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("delta"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 9, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 1557432153, true, true, NULL),
true, "delta force existing, timestamp after copy time");
// Change the existing file to zero-length
@ -329,9 +329,9 @@ testRun(void)
TEST_RESULT_BOOL(
restoreFile(
repoFile1, repoIdx, repoFileReferenceFull, compressTypeNone, STRDEF("delta"),
STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, 1557432154, 0600, TEST_USER_STR, TEST_GROUP_STR, 0,
true, false, NULL),
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL, compressTypeNone,
STRDEF("delta"), STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, 1557432154, 0600, TEST_USER_STR,
TEST_GROUP_STR, 0, true, false, NULL),
false, "sha1 delta existing, content differs");
}
@ -2412,17 +2412,18 @@ testRun(void)
(ManifestFile){
.name = STRDEF(TEST_PGDATA PG_PATH_GLOBAL "/999"), .size = 0, .timestamp = 1482182860,
.mode = 0600, .group = groupName(), .user = userName(),
.checksumSha1 = HASH_TYPE_SHA1_ZERO, .reference = STRDEF(TEST_LABEL)});
.checksumSha1 = HASH_TYPE_SHA1_ZERO});
HRN_STORAGE_PUT_EMPTY(storageRepoWrite(), TEST_REPO_PATH PG_PATH_GLOBAL "/999");
// PG_VERSION
manifestFileAdd(
manifest,
(ManifestFile){
.name = STRDEF(TEST_PGDATA PG_FILE_PGVERSION), .size = 4, .timestamp = 1482182860,
.mode = 0600, .group = groupName(), .user = userName(),
.checksumSha1 = "8dbabb96e032b8d9f1993c0e4b9141e71ade01a1"});
HRN_STORAGE_PUT_Z(storageRepoWrite(), TEST_REPO_PATH PG_FILE_PGVERSION, PG_VERSION_94_STR "\n");
.name = STRDEF(TEST_PGDATA PG_FILE_PGVERSION), .size = 4, .sizeRepo = 4, .timestamp = 1482182860,
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 1, .bundleOffset = 0,
.reference = STRDEF(TEST_LABEL), .checksumSha1 = "8dbabb96e032b8d9f1993c0e4b9141e71ade01a1"});
HRN_STORAGE_PUT_Z(
storageRepoWrite(), STORAGE_REPO_BACKUP "/" TEST_LABEL "/bundle/1", PG_VERSION_94_STR "\n" PG_VERSION_94_STR "\n");
// base directory
manifestPathAdd(
@ -2436,14 +2437,13 @@ testRun(void)
&(ManifestPath){
.name = STRDEF(TEST_PGDATA PG_PATH_BASE "/1"), .mode = 0700, .group = groupName(), .user = userName()});
// base/1/PG_VERSION
// base/1/PG_VERSION. File was written as part of bundle 1 above
manifestFileAdd(
manifest,
(ManifestFile){
.name = STRDEF(TEST_PGDATA "base/1/" PG_FILE_PGVERSION), .size = 4, .timestamp = 1482182860,
.mode = 0600, .group = groupName(), .user = userName(),
.name = STRDEF(TEST_PGDATA "base/1/" PG_FILE_PGVERSION), .size = 4, .sizeRepo = 4, .timestamp = 1482182860,
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 1, .bundleOffset = 4,
.checksumSha1 = "8dbabb96e032b8d9f1993c0e4b9141e71ade01a1"});
HRN_STORAGE_PUT_Z(storageRepoWrite(), TEST_REPO_PATH "base/1/" PG_FILE_PGVERSION, PG_VERSION_94_STR "\n");
// base/1/2
fileBuffer = bufNew(8192);

View File

@ -856,19 +856,22 @@ testRun(void)
String *filePathName = strNewZ(STORAGE_REPO_ARCHIVE "/testfile");
HRN_STORAGE_PUT_EMPTY(storageRepoWrite(), strZ(filePathName));
TEST_RESULT_UINT(verifyFile(filePathName, STRDEF(HASH_TYPE_SHA1_ZERO), 0, NULL), verifyOk, "file ok");
TEST_RESULT_UINT(
verifyFile(filePathName, 0, NULL, compressTypeNone, STRDEF(HASH_TYPE_SHA1_ZERO), 0, NULL), verifyOk, "file ok");
//--------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("file size invalid in archive");
HRN_STORAGE_PUT_Z(storageRepoWrite(), strZ(filePathName), fileContents);
TEST_RESULT_UINT(verifyFile(filePathName, fileChecksum, 0, NULL), verifySizeInvalid, "file size invalid");
TEST_RESULT_UINT(
verifyFile(filePathName, 0, NULL, compressTypeNone, fileChecksum, 0, NULL), verifySizeInvalid, "file size invalid");
//--------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("file missing in archive");
TEST_RESULT_UINT(
verifyFile(
strNewFmt(STORAGE_REPO_ARCHIVE "/missingFile"), fileChecksum, 0, NULL), verifyFileMissing, "file missing");
verifyFile(strNewFmt(STORAGE_REPO_ARCHIVE "/missingFile"), 0, NULL, compressTypeNone, fileChecksum, 0, NULL),
verifyFileMissing, "file missing");
//--------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("encrypted/compressed file in backup");
@ -881,10 +884,11 @@ testRun(void)
strCatZ(filePathName, ".gz");
TEST_RESULT_UINT(
verifyFile(filePathName, fileChecksum, fileSize, STRDEF("pass")), verifyOk, "file encrypted compressed ok");
verifyFile(filePathName, 0, NULL, compressTypeGz, fileChecksum, fileSize, STRDEF("pass")),
verifyOk, "file encrypted compressed ok");
TEST_RESULT_UINT(
verifyFile(
filePathName, STRDEF("badchecksum"), fileSize, STRDEF("pass")), verifyChecksumMismatch,
filePathName, 0, NULL, compressTypeGz, STRDEF("badchecksum"), fileSize, STRDEF("pass")), verifyChecksumMismatch,
"file encrypted compressed checksum mismatch");
}
@ -1383,6 +1387,7 @@ testRun(void)
// Create valid full backup and valid diff backup
manifestContent = strNewFmt(
TEST_MANIFEST_HEADER
"backup-bundle=true\n"
"\n"
"[backup:db]\n"
TEST_BACKUP_DB2_11
@ -1391,7 +1396,8 @@ testRun(void)
TEST_MANIFEST_DB
"\n"
"[target:file]\n"
"pg_data/validfile={\"checksum\":\"%s\",\"size\":%u,\"timestamp\":1565282114}\n"
"pg_data/validfile={\"bni\":1,\"bno\":3,\"checksum\":\"%s\",\"size\":%u,\"timestamp\":1565282114}\n"
"pg_data/zerofile={\"size\":0,\"timestamp\":1565282114}\n"
TEST_MANIFEST_FILE_DEFAULT
TEST_MANIFEST_LINK
TEST_MANIFEST_LINK_DEFAULT
@ -1407,7 +1413,8 @@ testRun(void)
.comment = "valid manifest copy - full");
HRN_STORAGE_PUT_Z(
storageRepoWrite(), STORAGE_REPO_BACKUP "/20201119-163000F/pg_data/validfile", fileContents, .comment = "valid file");
storageRepoWrite(), STORAGE_REPO_BACKUP "/20201119-163000F/bundle/1", strZ(strNewFmt("XXX%s", fileContents)),
.comment = "valid file");
// Create WAL file with just header info and small WAL size
Buffer *walBuffer = bufNew((size_t)(1024 * 1024));
@ -1434,7 +1441,7 @@ testRun(void)
" missing: 1, checksum invalid: 1, size invalid: 1, other: 0\n"
" backup: 20181119-152900F_20181119-152909D, status: invalid, total files checked: 1, total valid files: 0\n"
" missing: 0, checksum invalid: 1, size invalid: 0, other: 0\n"
" backup: 20201119-163000F, status: valid, total files checked: 1, total valid files: 1\n"
" backup: 20201119-163000F, status: valid, total files checked: 2, total valid files: 2\n"
" missing: 0, checksum invalid: 0, size invalid: 0, other: 0");
}

View File

@ -31,7 +31,7 @@ testRun(void)
// -------------------------------------------------------------------------------------------------------------------------
TEST_RESULT_UINT(sizeof(ManifestLoadFound), TEST_64BIT() ? 1 : 1, "check size of ManifestLoadFound");
TEST_RESULT_UINT(sizeof(ManifestPath), TEST_64BIT() ? 32 : 16, "check size of ManifestPath");
TEST_RESULT_UINT(sizeof(ManifestFile), TEST_64BIT() ? 120 : 92, "check size of ManifestFile");
TEST_RESULT_UINT(sizeof(ManifestFile), TEST_64BIT() ? 136 : 108, "check size of ManifestFile");
}
// *****************************************************************************************************************************
@ -291,7 +291,7 @@ testRun(void)
// Test tablespace error
TEST_ERROR(
manifestNewBuild(
storagePg, PG_VERSION_90, hrnPgCatalogVersion(PG_VERSION_90), false, false, exclusionList, tablespaceList),
storagePg, PG_VERSION_90, hrnPgCatalogVersion(PG_VERSION_90), false, false, false, exclusionList, tablespaceList),
AssertError,
"tablespace with oid 1 not found in tablespace map\n"
"HINT: was a tablespace created or dropped during the backup?");
@ -307,7 +307,8 @@ testRun(void)
TEST_ASSIGN(
manifest,
manifestNewBuild(storagePg, PG_VERSION_90, hrnPgCatalogVersion(PG_VERSION_90), false, false, NULL, tablespaceList),
manifestNewBuild(
storagePg, PG_VERSION_90, hrnPgCatalogVersion(PG_VERSION_90), false, false, false, NULL, tablespaceList),
"build manifest");
Buffer *contentSave = bufNew(0);
@ -405,7 +406,8 @@ testRun(void)
// Test manifest - temp tables, unlogged tables, pg_serial and pg_xlog files ignored
TEST_ASSIGN(
manifest, manifestNewBuild(storagePg, PG_VERSION_91, hrnPgCatalogVersion(PG_VERSION_91), true, false, NULL, NULL),
manifest,
manifestNewBuild(storagePg, PG_VERSION_91, hrnPgCatalogVersion(PG_VERSION_91), true, false, false, NULL, NULL),
"build manifest");
contentSave = bufNew(0);
@ -484,7 +486,8 @@ testRun(void)
// Test manifest - pg_snapshots files ignored
TEST_ASSIGN(
manifest, manifestNewBuild(storagePg, PG_VERSION_92, hrnPgCatalogVersion(PG_VERSION_92), false, false, NULL, NULL),
manifest,
manifestNewBuild(storagePg, PG_VERSION_92, hrnPgCatalogVersion(PG_VERSION_92), false, false, false, NULL, NULL),
"build manifest");
contentSave = bufNew(0);
@ -546,7 +549,7 @@ testRun(void)
THROW_ON_SYS_ERROR(symlink(TEST_PATH "/wal", TEST_PATH "/wal/wal") == -1, FileOpenError, "unable to create symlink");
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_92, hrnPgCatalogVersion(PG_VERSION_92), false, false, NULL, NULL),
manifestNewBuild(storagePg, PG_VERSION_92, hrnPgCatalogVersion(PG_VERSION_92), false, false, false, NULL, NULL),
LinkDestinationError,
"link 'pg_xlog/wal' (" TEST_PATH "/wal) destination is the same directory as link 'pg_xlog' (" TEST_PATH "/wal)");
@ -600,7 +603,8 @@ testRun(void)
// Test manifest - pg_dynshmem, pg_replslot and postgresql.auto.conf.tmp files ignored
TEST_ASSIGN(
manifest, manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, true, NULL, NULL),
manifest,
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, true, false, NULL, NULL),
"build manifest");
contentSave = bufNew(0);
@ -693,7 +697,7 @@ testRun(void)
// Tablespace link errors when correct verion not found
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_12, hrnPgCatalogVersion(PG_VERSION_12), false, false, NULL, NULL),
manifestNewBuild(storagePg, PG_VERSION_12, hrnPgCatalogVersion(PG_VERSION_12), false, false, false, NULL, NULL),
FileOpenError,
"unable to get info for missing path/file '" TEST_PATH "/pg/pg_tblspc/1/PG_12_201909212': [2] No such file or"
" directory");
@ -712,7 +716,8 @@ testRun(void)
// and backup_label ignored. Old recovery files and pg_xlog are now just another file/directory and will not be ignored.
// pg_wal contents will be ignored online. pg_clog pgVersion > 10 primary:true, pg_xact pgVersion > 10 primary:false
TEST_ASSIGN(
manifest, manifestNewBuild(storagePg, PG_VERSION_12, hrnPgCatalogVersion(PG_VERSION_12), true, false, NULL, NULL),
manifest,
manifestNewBuild(storagePg, PG_VERSION_12, hrnPgCatalogVersion(PG_VERSION_12), true, false, false, NULL, NULL),
"build manifest");
contentSave = bufNew(0);
@ -784,7 +789,8 @@ testRun(void)
// pg_wal not ignored
TEST_ASSIGN(
manifest, manifestNewBuild(storagePg, PG_VERSION_13, hrnPgCatalogVersion(PG_VERSION_13), false, false, NULL, NULL),
manifest,
manifestNewBuild(storagePg, PG_VERSION_13, hrnPgCatalogVersion(PG_VERSION_13), false, false, false, NULL, NULL),
"build manifest");
contentSave = bufNew(0);
@ -854,7 +860,7 @@ testRun(void)
THROW_ON_SYS_ERROR(symlink(TEST_PATH "/pg/base", TEST_PATH "/pg/link") == -1, FileOpenError, "unable to create symlink");
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, NULL, NULL),
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, false, NULL, NULL),
LinkDestinationError, "link 'link' destination '" TEST_PATH "/pg/base' is in PGDATA");
THROW_ON_SYS_ERROR(unlink(TEST_PATH "/pg/link") == -1, FileRemoveError, "unable to remove symlink");
@ -865,7 +871,7 @@ testRun(void)
HRN_STORAGE_PATH_CREATE(storagePgWrite, MANIFEST_TARGET_PGTBLSPC "/somedir", .mode = 0700);
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, NULL, NULL),
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, false, NULL, NULL),
LinkExpectedError, "'pg_data/pg_tblspc/somedir' is not a symlink - pg_tblspc should contain only symlinks");
HRN_STORAGE_PATH_REMOVE(storagePgWrite, MANIFEST_TARGET_PGTBLSPC "/somedir");
@ -876,7 +882,7 @@ testRun(void)
HRN_STORAGE_PUT_EMPTY(storagePgWrite, MANIFEST_TARGET_PGTBLSPC "/somefile");
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, NULL, NULL),
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, false, NULL, NULL),
LinkExpectedError, "'pg_data/pg_tblspc/somefile' is not a symlink - pg_tblspc should contain only symlinks");
TEST_STORAGE_EXISTS(storagePgWrite, MANIFEST_TARGET_PGTBLSPC "/somefile", .remove = true);
@ -887,7 +893,8 @@ testRun(void)
THROW_ON_SYS_ERROR(symlink("../bogus-link", TEST_PATH "/pg/link-to-link") == -1, FileOpenError, "unable to create symlink");
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, true, NULL, NULL), FileOpenError,
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, true, false, NULL, NULL),
FileOpenError,
"unable to get info for missing path/file '" TEST_PATH "/pg/link-to-link': [2] No such file or directory");
THROW_ON_SYS_ERROR(unlink(TEST_PATH "/pg/link-to-link") == -1, FileRemoveError, "unable to remove symlink");
@ -902,7 +909,7 @@ testRun(void)
symlink(TEST_PATH "/linktest", TEST_PATH "/pg/linktolink") == -1, FileOpenError, "unable to create symlink");
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, NULL, NULL),
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, false, NULL, NULL),
LinkDestinationError, "link '" TEST_PATH "/pg/linktolink' cannot reference another link '" TEST_PATH "/linktest'");
#undef TEST_MANIFEST_HEADER
@ -1413,6 +1420,7 @@ testRun(void)
"[backup]\n" \
"backup-archive-start=\"000000030000028500000089\"\n" \
"backup-archive-stop=\"000000030000028500000089\"\n" \
"backup-bundle=true\n" \
"backup-label=\"20190818-084502F_20190820-084502D\"\n" \
"backup-lsn-start=\"285/89000028\"\n" \
"backup-lsn-stop=\"285/89001F88\"\n" \
@ -1472,10 +1480,10 @@ testRun(void)
"pg_data/=equal=more=={\"mode\":\"0640\",\"size\":0,\"timestamp\":1565282120}\n" \
"pg_data/PG_VERSION={\"checksum\":\"184473f470864e067ee3a22e64b47b0a1c356f29\"" \
",\"reference\":\"20190818-084502F_20190819-084506D\",\"size\":4,\"timestamp\":1565282114}\n" \
"pg_data/base/16384/17000={\"checksum\":\"e0101dd8ffb910c9c202ca35b5f828bcb9697bed\",\"checksum-page\":false" \
"pg_data/base/16384/17000={\"bni\":1,\"checksum\":\"e0101dd8ffb910c9c202ca35b5f828bcb9697bed\",\"checksum-page\":false"\
",\"checksum-page-error\":[1],\"repo-size\":4096,\"size\":8192,\"timestamp\":1565282114}\n" \
"pg_data/base/16384/PG_VERSION={\"checksum\":\"184473f470864e067ee3a22e64b47b0a1c356f29\",\"group\":\"group2\"" \
",\"size\":4,\"timestamp\":1565282115,\"user\":false}\n" \
"pg_data/base/16384/PG_VERSION={\"bni\":1,\"bno\":1,\"checksum\":\"184473f470864e067ee3a22e64b47b0a1c356f29\"" \
",\"group\":\"group2\",\"size\":4,\"timestamp\":1565282115,\"user\":false}\n" \
"pg_data/base/32768/33000={\"checksum\":\"7a16d165e4775f7c92e8cdf60c0af57313f0bf90\",\"checksum-page\":true" \
",\"reference\":\"20190818-084502F\",\"size\":1073741824,\"timestamp\":1565282116}\n" \
"pg_data/base/32768/33000.32767={\"checksum\":\"6e99b589e550e68e934fd235ccba59fe5b592a9e\",\"checksum-page\":true" \
@ -1524,6 +1532,7 @@ testRun(void)
"[backup]\n"
"backup-archive-start=\"000000040000028500000089\"\n"
"backup-archive-stop=\"000000040000028500000089\"\n"
"backup-bundle=true\n"
"backup-label=\"20190818-084502F\"\n"
"backup-lsn-start=\"300/89000028\"\n"
"backup-lsn-stop=\"300/89001F88\"\n"
@ -1575,8 +1584,8 @@ testRun(void)
TEST_TITLE("manifest validation");
// Munge files to produce errors
manifestFileUpdate(manifest, STRDEF("pg_data/postgresql.conf"), 4457, 0, NULL, NULL, false, false, NULL);
manifestFileUpdate(manifest, STRDEF("pg_data/base/32768/33000.32767"), 0, 0, NULL, NULL, true, false, NULL);
manifestFileUpdate(manifest, STRDEF("pg_data/postgresql.conf"), 4457, 0, NULL, NULL, false, false, NULL, 0, 0);
manifestFileUpdate(manifest, STRDEF("pg_data/base/32768/33000.32767"), 0, 0, NULL, NULL, true, false, NULL, 0, 0);
TEST_ERROR(
manifestValidate(manifest, false), FormatError,
@ -1591,10 +1600,10 @@ testRun(void)
"repo size must be > 0 for file 'pg_data/postgresql.conf'");
// Undo changes made to files
manifestFileUpdate(manifest, STRDEF("pg_data/base/32768/33000.32767"), 32768, 32768, NULL, NULL, true, false, NULL);
manifestFileUpdate(manifest, STRDEF("pg_data/base/32768/33000.32767"), 32768, 32768, NULL, NULL, true, false, NULL, 0, 0);
manifestFileUpdate(
manifest, STRDEF("pg_data/postgresql.conf"), 4457, 4457, "184473f470864e067ee3a22e64b47b0a1c356f29", NULL, false,
false, NULL);
false, NULL, 0, 0);
TEST_RESULT_VOID(manifestValidate(manifest, true), "successful validate");
@ -1739,10 +1748,11 @@ testRun(void)
TEST_RESULT_BOOL(manifestFileExists(manifest, STRDEF("bogus")), false, "manifest file does not exist");
TEST_RESULT_VOID(
manifestFileUpdate(manifest, STRDEF("pg_data/postgresql.conf"), 4457, 4457, "", NULL, false, false, NULL),
manifestFileUpdate(manifest, STRDEF("pg_data/postgresql.conf"), 4457, 4457, "", NULL, false, false, NULL, 0, 0),
"update file");
TEST_RESULT_VOID(
manifestFileUpdate(manifest, STRDEF("pg_data/postgresql.conf"), 4457, 4457, NULL, varNewStr(NULL), false, false, NULL),
manifestFileUpdate(
manifest, STRDEF("pg_data/postgresql.conf"), 4457, 4457, NULL, varNewStr(NULL), false, false, NULL, 0, 0),
"update file");
// ManifestDb getters

View File

@ -271,7 +271,8 @@ testRun(void)
MEM_CONTEXT_BEGIN(testContext)
{
TEST_ASSIGN(manifest, manifestNewBuild(storagePg, PG_VERSION_91, 999999999, false, false, NULL, NULL), "build files");
TEST_ASSIGN(
manifest, manifestNewBuild(storagePg, PG_VERSION_91, 999999999, false, false, false, NULL, NULL), "build files");
}
MEM_CONTEXT_END();