1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2025-07-15 01:04:37 +02:00

Block-level incremental backup.

The primary goal of the block incremental backup is to save space in the repository by only storing changed parts of a file rather than the entire file. This implementation is focused on restore performance more than saving space in the repository, though there may be substantial savings depending on the workload.

The repo-block option enables the feature (when repo-bundle is already enabled). The block size is determined based on the file size and age. Very old or very small files will not use block incremental.
This commit is contained in:
David Steele
2023-01-20 16:48:57 +07:00
committed by GitHub
parent 008a18555c
commit 912eec63bb
47 changed files with 3370 additions and 221 deletions

View File

@ -58,6 +58,22 @@
</release-improvement-list>
<release-development-list>
<release-item>
<github-issue id="959"/>
<github-issue id="1806"/>
<github-issue id="1913"/>
<github-pull-request id="1916"/>
<release-item-contributor-list>
<release-item-contributor id="david.steele"/>
<release-item-reviewer id="stephen.frost"/>
<release-item-reviewer id="stefan.fercot"/>
<release-item-reviewer id="john.morris"/>
</release-item-contributor-list>
<p>Block-level incremental backup.</p>
</release-item>
<release-item>
<commit subject="Rename EncodeType enum to EncodingType."/>
<commit subject="Add hex encode/decoding to decode module."/>

View File

@ -59,6 +59,8 @@ SRCS = \
command/archive/push/protocol.c \
command/archive/push/push.c \
command/backup/backup.c \
command/backup/blockIncr.c \
command/backup/blockMap.c \
command/backup/common.c \
command/backup/pageChecksum.c \
command/backup/protocol.c \
@ -80,6 +82,7 @@ SRCS = \
command/repo/ls.c \
command/repo/put.c \
command/repo/rm.c \
command/restore/deltaMap.c \
command/restore/file.c \
command/restore/protocol.c \
command/restore/restore.c \
@ -110,10 +113,12 @@ SRCS = \
common/exec.c \
common/fork.c \
common/ini.c \
common/io/chunkedRead.c \
common/io/client.c \
common/io/fd.c \
common/io/fdRead.c \
common/io/fdWrite.c \
common/io/filter/chunk.c \
common/io/filter/size.c \
common/io/http/client.c \
common/io/http/common.c \

View File

@ -1818,6 +1818,22 @@ option:
default: 2MiB
allow-range: [8KiB, 1PiB]
repo-block:
section: global
group: repo
type: boolean
internal: true
default: false
command:
backup: {}
command-role:
main: {}
depend:
option: repo-bundle
default: false
list:
- true
repo-cipher-pass:
section: global
type: string

View File

@ -506,6 +506,21 @@
<example>path</example>
</config-key>
<config-key id="repo-block" name="Block Incremental Backup">
<summary>Enable block incremental backup.</summary>
<text>
<p>Block incremental allows for more granular backups by splitting files into blocks that can be backed up independently. This saves space in the repository and can improve delta restore performance because individual blocks can be fetched without reading the entire file from the repository.</p>
<admonition type="note">The <br-option>repo-bundle</br-option> option must be enabled before <br-option>repo-block</br-option> can be enabled.</admonition>
<p>The block size for a file is determined based on the file size and age. Generally, older/larger files will get larger block sizes. If a file is old enough, it will not be backed up using block incremental.</p>
<p>Block incremental is most efficient when enabled for all backup types, including full. This makes the full a bit larger but subsequent differential and incremental backups can make use of the block maps generated by the full backup to save space.</p>
</text>
<example>y</example>
</config-key>
<config-key id="repo-bundle" name="Repository Bundles">
<summary>Bundle files in repository.</summary>

View File

@ -581,6 +581,7 @@ backupResumeClean(
{
ASSERT(file.copy);
ASSERT(file.bundleId == 0);
ASSERT(file.blockIncrMapSize == 0);
file.sizeRepo = fileResume.sizeRepo;
file.checksumSha1 = fileResume.checksumSha1;
@ -1191,6 +1192,7 @@ backupJobResult(
const BackupCopyResult copyResult = (BackupCopyResult)pckReadU32P(jobResult);
const uint64_t copySize = pckReadU64P(jobResult);
const uint64_t bundleOffset = pckReadU64P(jobResult);
const uint64_t blockIncrMapSize = pckReadU64P(jobResult);
const uint64_t repoSize = pckReadU64P(jobResult);
const Buffer *const copyChecksum = pckReadBinP(jobResult);
const Buffer *const repoChecksum = pckReadBinP(jobResult);
@ -1341,6 +1343,7 @@ backupJobResult(
jsonFromVar(varNewVarLst(checksumPageErrorList)) : NULL;
file.bundleId = bundleId;
file.bundleOffset = bundleOffset;
file.blockIncrMapSize = blockIncrMapSize;
manifestFileUpdate(manifest, &file);
}
@ -1446,6 +1449,7 @@ typedef struct BackupJobData
uint64_t bundleSize; // Target bundle size
uint64_t bundleLimit; // Limit on files to bundle
uint64_t bundleId; // Bundle id
const bool blockIncr; // Block incremental?
List *queueList; // List of processing queues
} BackupJobData;
@ -1714,6 +1718,9 @@ static ProtocolParallelJob *backupJobCallback(void *data, unsigned int clientIdx
continue;
}
// Is this file a block incremental?
const bool blockIncr = jobData->blockIncr && file.blockIncrSize > 0;
// Add common parameters before first file
if (param == NULL)
{
@ -1722,6 +1729,7 @@ static ProtocolParallelJob *backupJobCallback(void *data, unsigned int clientIdx
if (bundle && file.size <= jobData->bundleLimit)
{
pckWriteStrP(param, backupFileRepoPathP(jobData->backupLabel, .bundleId = jobData->bundleId));
pckWriteU64P(param, jobData->bundleId);
}
else
{
@ -1730,12 +1738,17 @@ static ProtocolParallelJob *backupJobCallback(void *data, unsigned int clientIdx
pckWriteStrP(
param,
backupFileRepoPathP(
jobData->backupLabel, .manifestName = file.name, .compressType = jobData->compressType));
jobData->backupLabel, .manifestName = file.name, .compressType = jobData->compressType,
.blockIncr = blockIncr));
pckWriteU64P(param, 0);
fileName = file.name;
bundle = false;
}
// Provide the backup reference
pckWriteU64P(param, strLstSize(manifestReferenceList(jobData->manifest)) - 1);
pckWriteU32P(param, jobData->compressType);
pckWriteI32P(param, jobData->compressLevel);
pckWriteU64P(param, jobData->cipherSubPass == NULL ? cipherTypeNone : cipherTypeAes256Cbc);
@ -1749,6 +1762,27 @@ static ProtocolParallelJob *backupJobCallback(void *data, unsigned int clientIdx
pckWriteBoolP(param, !backupProcessFilePrimary(jobData->standbyExp, file.name));
pckWriteBinP(param, file.checksumSha1 != NULL ? BUF(file.checksumSha1, HASH_TYPE_SHA1_SIZE) : NULL);
pckWriteBoolP(param, file.checksumPage);
// If block incremental then provide the location of the prior map when available
if (blockIncr)
{
pckWriteU64P(param, file.blockIncrSize);
if (file.blockIncrMapSize != 0)
{
pckWriteStrP(
param,
backupFileRepoPathP(
file.reference, .manifestName = file.name, .bundleId = file.bundleId, .blockIncr = true));
pckWriteU64P(param, file.bundleOffset + file.sizeRepo - file.blockIncrMapSize);
pckWriteU64P(param, file.blockIncrMapSize);
}
else
pckWriteNullP(param);
}
else
pckWriteU64P(param, 0);
pckWriteStrP(param, file.name);
pckWriteBinP(param, file.checksumRepoSha1 != NULL ? BUF(file.checksumRepoSha1, HASH_TYPE_SHA1_SIZE) : NULL);
pckWriteU64P(param, file.sizeRepo);
@ -1829,6 +1863,7 @@ backupProcess(
.delta = cfgOptionBool(cfgOptDelta),
.bundle = cfgOptionBool(cfgOptRepoBundle),
.bundleId = 1,
.blockIncr = cfgOptionBool(cfgOptRepoBlock),
// Build expression to identify files that can be copied from the standby when standby backup is supported
.standbyExp = regExpNew(
@ -2112,8 +2147,8 @@ backupArchiveCheckCopy(const BackupData *const backupData, Manifest *const manif
storageNewWriteP(
storageRepoWrite(),
backupFileRepoPathP(
manifestData(manifest)->backupLabel, .manifestName = manifestName,
.compressType = compressTypeEnum(cfgOptionStrId(cfgOptCompressType)))));
manifestData(manifest)->backupLabel, manifestName, 0,
compressTypeEnum(cfgOptionStrId(cfgOptCompressType)), false)));
// Add to manifest
ManifestFile file =
@ -2270,9 +2305,9 @@ cmdBackup(void)
// Build the manifest
Manifest *manifest = manifestNewBuild(
backupData->storagePrimary, infoPg.version, infoPg.catalogVersion, cfgOptionBool(cfgOptOnline),
cfgOptionBool(cfgOptChecksumPage), cfgOptionBool(cfgOptRepoBundle), strLstNewVarLst(cfgOptionLst(cfgOptExclude)),
backupStartResult.tablespaceList);
backupData->storagePrimary, infoPg.version, infoPg.catalogVersion, timestampStart, cfgOptionBool(cfgOptOnline),
cfgOptionBool(cfgOptChecksumPage), cfgOptionBool(cfgOptRepoBundle), cfgOptionBool(cfgOptRepoBlock),
strLstNewVarLst(cfgOptionLst(cfgOptExclude)), backupStartResult.tablespaceList);
// Validate the manifest using the copy start time
manifestBuildValidate(
@ -2317,8 +2352,8 @@ cmdBackup(void)
// Complete manifest
manifestBuildComplete(
manifest, timestampStart, backupStartResult.lsn, backupStartResult.walSegmentName, backupStopResult.timestamp,
backupStopResult.lsn, backupStopResult.walSegmentName, infoPg.id, infoPg.systemId, backupStartResult.dbList,
manifest, backupStartResult.lsn, backupStartResult.walSegmentName, backupStopResult.timestamp, backupStopResult.lsn,
backupStopResult.walSegmentName, infoPg.id, infoPg.systemId, backupStartResult.dbList,
cfgOptionBool(cfgOptArchiveCheck), cfgOptionBool(cfgOptArchiveCopy), cfgOptionUInt(cfgOptBufferSize),
cfgOptionUInt(cfgOptCompressLevel), cfgOptionUInt(cfgOptCompressLevelNetwork), cfgOptionBool(cfgOptRepoHardlink),
cfgOptionUInt(cfgOptProcessMax), cfgOptionBool(cfgOptBackupStandby),

View File

@ -0,0 +1,452 @@
/***********************************************************************************************************************************
Block Incremental Filter
***********************************************************************************************************************************/
#include "build.auto.h"
#include "command/backup/blockIncr.h"
#include "command/backup/blockMap.h"
#include "common/compress/helper.h"
#include "common/crypto/cipherBlock.h"
#include "common/crypto/hash.h"
#include "common/debug.h"
#include "common/io/bufferRead.h"
#include "common/io/bufferWrite.h"
#include "common/io/filter/buffer.h"
#include "common/io/filter/chunk.h"
#include "common/io/filter/size.h"
#include "common/io/io.h"
#include "common/log.h"
#include "common/type/pack.h"
#include "common/type/object.h"
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
typedef struct BlockIncr
{
MemContext *memContext; // Mem context of filter
unsigned int reference; // Current backup reference
uint64_t bundleId; // Bundle id
StringId compressType; // Compress filter type
const Pack *compressParam; // Compress filter parameters
const Pack *encryptParam; // Encrypt filter parameters
unsigned int blockNo; // Block number
unsigned int blockNoLast; // Last block no
uint64_t blockOffset; // Block offset
size_t blockSize; // Block size
Buffer *block; // Block buffer
Buffer *blockOut; // Block output buffer
size_t blockOutOffset; // Block output offset (already copied to output buffer)
const BlockMap *blockMapPrior; // Prior block map
BlockMap *blockMapOut; // Output block map
uint64_t blockMapOutSize; // Output block map size (if any)
size_t inputOffset; // Input offset
bool inputSame; // Input the same data
bool done; // Is the filter done?
} BlockIncr;
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
static void
blockIncrToLog(const BlockIncr *const this, StringStatic *const debugLog)
{
strStcFmt(debugLog, "{blockSize: %zu}", this->blockSize);
}
#define FUNCTION_LOG_BLOCK_INCR_TYPE \
BlockIncr *
#define FUNCTION_LOG_BLOCK_INCR_FORMAT(value, buffer, bufferSize) \
FUNCTION_LOG_OBJECT_FORMAT(value, blockIncrToLog, buffer, bufferSize)
/***********************************************************************************************************************************
Count bytes in the input
***********************************************************************************************************************************/
static void
blockIncrProcess(THIS_VOID, const Buffer *const input, Buffer *const output)
{
THIS(BlockIncr);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(BLOCK_INCR, this);
FUNCTION_LOG_PARAM(BUFFER, input);
FUNCTION_LOG_PARAM(BUFFER, output);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(output != NULL);
// Is the input done?
this->done = input == NULL;
// Loop until the input is consumed or there is output
do
{
// If still accumulating data in the buffer
if (!this->done && bufUsed(this->block) < this->blockSize)
{
// If all input can be copied
if (bufUsed(input) - this->inputOffset <= bufRemains(this->block))
{
bufCatSub(this->block, input, this->inputOffset, bufUsed(input) - this->inputOffset);
this->inputOffset = 0;
// Same input no longer required
this->inputSame = false;
}
// Else only part of the input can be copied
else
{
const size_t copySize = bufRemains(this->block);
bufCatSub(this->block, input, this->inputOffset, copySize);
this->inputOffset += copySize;
// The same input will be needed again to copy the rest
this->inputSame = true;
}
}
// If done or block is full
if (this->done || bufUsed(this->block) == this->blockSize)
{
// The output buffer must be empty before writing the new block (if not it will be flushed below)
if (bufUsed(this->blockOut) == 0)
{
// Store the block when size > 0
if (bufUsed(this->block) > 0)
{
MEM_CONTEXT_TEMP_BEGIN()
{
// Get block checksum
const Buffer *const checksum = cryptoHashOne(hashTypeSha1, this->block);
// Does the block exist in the input map?
const BlockMapItem *const blockMapItemIn =
this->blockMapPrior != NULL && this->blockNo < blockMapSize(this->blockMapPrior) ?
blockMapGet(this->blockMapPrior, this->blockNo) : NULL;
// If the block is new or has changed then write it
if (blockMapItemIn == NULL ||
memcmp(blockMapItemIn->checksum, bufPtrConst(checksum), bufUsed(checksum)) != 0)
{
IoWrite *const write = ioBufferWriteNew(this->blockOut);
bool bufferRequired = true;
// Add compress filter
if (this->compressParam != NULL)
{
ioFilterGroupAdd(
ioWriteFilterGroup(write), compressFilterPack(this->compressType, this->compressParam));
bufferRequired = false;
}
// Add encrypt filter
if (this->encryptParam != NULL)
{
ioFilterGroupAdd(ioWriteFilterGroup(write), cipherBlockNewPack(this->encryptParam));
bufferRequired = false;
}
// If no compress/encrypt then add a buffer so chunk sizes are as large as possible
if (bufferRequired)
ioFilterGroupAdd(ioWriteFilterGroup(write), ioBufferNew());
// Add chunk and size filters
ioFilterGroupAdd(ioWriteFilterGroup(write), ioChunkNew());
ioFilterGroupAdd(ioWriteFilterGroup(write), ioSizeNew());
ioWriteOpen(write);
// Write the block no as a delta of the prior block no
ioWriteVarIntU64(write, this->blockNo - this->blockNoLast);
// Copy block data through the filters and close
ioCopyP(ioBufferReadNewOpen(this->block), write);
ioWriteClose(write);
// Write to block map
BlockMapItem blockMapItem =
{
.reference = this->reference,
.bundleId = this->bundleId,
.offset = this->blockOffset,
.size = pckReadU64P(ioFilterGroupResultP(ioWriteFilterGroup(write), SIZE_FILTER_TYPE)),
};
memcpy(blockMapItem.checksum, bufPtrConst(checksum), bufUsed(checksum));
blockMapAdd(this->blockMapOut, &blockMapItem);
// Increment block offset and last block no
this->blockOffset += blockMapItem.size;
this->blockNoLast = this->blockNo;
}
// Else write a reference to the block in the prior backup
else
{
blockMapAdd(this->blockMapOut, blockMapItemIn);
bufUsedZero(this->block);
}
this->blockNo++;
}
MEM_CONTEXT_TEMP_END();
}
// Write the block map if done processing and at least one block was written
if (this->done && this->blockNo > 0)
{
MEM_CONTEXT_TEMP_BEGIN()
{
// Size of block output before starting to write the map
const size_t blockOutBegin = bufUsed(this->blockOut);
// Write the map
IoWrite *const write = ioBufferWriteNew(this->blockOut);
if (this->encryptParam != NULL)
ioFilterGroupAdd(ioWriteFilterGroup(write), cipherBlockNewPack(this->encryptParam));
// Write the map
ioWriteOpen(write);
blockMapWrite(this->blockMapOut, write);
ioWriteClose(write);
// Get total bytes written for the map
this->blockMapOutSize = bufUsed(this->blockOut) - blockOutBegin;
}
MEM_CONTEXT_TEMP_END();
}
}
// Copy to output buffer
const size_t blockOutSize = bufUsed(this->blockOut) - this->blockOutOffset;
if (blockOutSize > 0)
{
// Output the rest of the block if it will fit
if (bufRemains(output) >= blockOutSize)
{
bufCatSub(output, this->blockOut, this->blockOutOffset, blockOutSize);
bufUsedZero(this->blockOut);
bufUsedZero(this->block);
this->blockOutOffset = 0;
this->inputSame = this->inputOffset != 0;
}
// Else output as much of the block as possible
else
{
const size_t blockOutSize = bufRemains(output);
bufCatSub(output, this->blockOut, this->blockOutOffset, blockOutSize);
this->blockOutOffset += blockOutSize;
this->inputSame = true;
}
}
}
}
while (this->inputSame && bufEmpty(output));
FUNCTION_LOG_RETURN_VOID();
}
/***********************************************************************************************************************************
The result is the size of the block map
***********************************************************************************************************************************/
static Pack *
blockIncrResult(THIS_VOID)
{
THIS(BlockIncr);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(BLOCK_INCR, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
Pack *result = NULL;
MEM_CONTEXT_TEMP_BEGIN()
{
PackWrite *const packWrite = pckWriteNewP();
pckWriteU64P(packWrite, this->blockMapOutSize);
pckWriteEndP(packWrite);
result = pckMove(pckWriteResult(packWrite), memContextPrior());
}
MEM_CONTEXT_TEMP_END();
FUNCTION_LOG_RETURN(PACK, result);
}
/***********************************************************************************************************************************
Is filter done?
***********************************************************************************************************************************/
static bool
blockIncrDone(const THIS_VOID)
{
THIS(const BlockIncr);
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(BLOCK_INCR, this);
FUNCTION_TEST_END();
ASSERT(this != NULL);
FUNCTION_TEST_RETURN(BOOL, this->done && !this->inputSame);
}
/***********************************************************************************************************************************
Should the same input be provided again?
***********************************************************************************************************************************/
static bool
blockIncrInputSame(const THIS_VOID)
{
THIS(const BlockIncr);
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(BLOCK_INCR, this);
FUNCTION_TEST_END();
ASSERT(this != NULL);
FUNCTION_TEST_RETURN(BOOL, this->inputSame);
}
/**********************************************************************************************************************************/
FN_EXTERN IoFilter *
blockIncrNew(
const size_t blockSize, const unsigned int reference, const uint64_t bundleId, const uint64_t bundleOffset,
const Buffer *const blockMapPrior, const IoFilter *const compress, const IoFilter *const encrypt)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(SIZE, blockSize);
FUNCTION_LOG_PARAM(UINT, reference);
FUNCTION_LOG_PARAM(UINT64, bundleId);
FUNCTION_LOG_PARAM(UINT64, bundleOffset);
FUNCTION_LOG_PARAM(BUFFER, blockMapPrior);
FUNCTION_LOG_PARAM(IO_FILTER, compress);
FUNCTION_LOG_PARAM(IO_FILTER, encrypt);
FUNCTION_LOG_END();
IoFilter *this = NULL;
OBJ_NEW_BEGIN(BlockIncr, .childQty = MEM_CONTEXT_QTY_MAX, .allocQty = MEM_CONTEXT_QTY_MAX)
{
BlockIncr *const driver = OBJ_NAME(OBJ_NEW_ALLOC(), IoFilter::BlockIncr);
*driver = (BlockIncr)
{
.memContext = memContextCurrent(),
.blockSize = blockSize,
.reference = reference,
.bundleId = bundleId,
.blockOffset = bundleOffset,
.block = bufNew(blockSize),
.blockOut = bufNew(0),
.blockMapOut = blockMapNew(),
};
// Duplicate compress filter
if (compress != NULL)
{
driver->compressType = ioFilterType(compress);
driver->compressParam = pckDup(ioFilterParamList(compress));
}
// Duplicate encrypt filter
if (encrypt != NULL)
driver->encryptParam = pckDup(ioFilterParamList(encrypt));
// Load prior block map
if (blockMapPrior)
{
MEM_CONTEXT_TEMP_BEGIN()
{
IoRead *const read = ioBufferReadNewOpen(blockMapPrior);
MEM_CONTEXT_PRIOR_BEGIN()
{
driver->blockMapPrior = blockMapNewRead(read);
}
MEM_CONTEXT_PRIOR_END();
}
MEM_CONTEXT_TEMP_END();
}
// Create param list
Pack *paramList = NULL;
MEM_CONTEXT_TEMP_BEGIN()
{
PackWrite *const packWrite = pckWriteNewP();
pckWriteU64P(packWrite, blockSize);
pckWriteU32P(packWrite, reference);
pckWriteU64P(packWrite, bundleId);
pckWriteU64P(packWrite, bundleOffset);
pckWriteBinP(packWrite, blockMapPrior);
pckWritePackP(packWrite, driver->compressParam);
if (driver->compressParam != NULL)
pckWriteStrIdP(packWrite, driver->compressType);
pckWritePackP(packWrite, driver->encryptParam);
pckWriteEndP(packWrite);
paramList = pckMove(pckWriteResult(packWrite), memContextPrior());
}
MEM_CONTEXT_TEMP_END();
this = ioFilterNewP(
BLOCK_INCR_FILTER_TYPE, driver, paramList, .done = blockIncrDone, .inOut = blockIncrProcess,
.inputSame = blockIncrInputSame, .result = blockIncrResult);
}
OBJ_NEW_END();
FUNCTION_LOG_RETURN(IO_FILTER, this);
}
FN_EXTERN IoFilter *
blockIncrNewPack(const Pack *const paramList)
{
IoFilter *result = NULL;
MEM_CONTEXT_TEMP_BEGIN()
{
PackRead *const paramListPack = pckReadNew(paramList);
const size_t blockSize = (size_t)pckReadU64P(paramListPack);
const unsigned int reference = pckReadU32P(paramListPack);
const uint64_t bundleId = (size_t)pckReadU64P(paramListPack);
const uint64_t bundleOffset = (size_t)pckReadU64P(paramListPack);
const Buffer *blockMapPrior = pckReadBinP(paramListPack);
// Create compress filter
const Pack *const compressParam = pckReadPackP(paramListPack);
const IoFilter *compress = NULL;
if (compressParam != NULL)
compress = compressFilterPack(pckReadStrIdP(paramListPack), compressParam);
// Create encrypt filter
const Pack *const encryptParam = pckReadPackP(paramListPack);
const IoFilter *encrypt = NULL;
if (encryptParam != NULL)
encrypt = cipherBlockNewPack(encryptParam);
result = ioFilterMove(
blockIncrNew(blockSize, reference, bundleId, bundleOffset, blockMapPrior, compress, encrypt), memContextPrior());
}
MEM_CONTEXT_TEMP_END();
return result;
}

View File

@ -0,0 +1,42 @@
/***********************************************************************************************************************************
Block Incremental Filter
The block incremental filter builds a block list and map (see BlockMap object) either from a file (the first time) or from a file
and a prior map (each subsequent time). The block list is stored first and the block map second. The filter returns the size of the
block map so it can be extracted separately.
The map is duplicated in each backup where the file has changed so the block map only needs to be retrieved from the most recent
backup. However, the block map may reference block lists (or parts thereof) in any prior (or the current) backup.
The block list consists of a series of blocks stored as chunks (see IoChunk filter) to make the format more flexible. Each block
consists of the following, compressed and encrypted as required:
- A varint-128 encoded block number stored as a delta of the previous block. So, if the first block is 138 it would be stored as 138
and if the second block is 139 it would be stored as 1. Block numbers are only needed when the block list is being read
sequentially, e.g. during verification. If blocks are accessed from the map then the block number is already known and the delta
can be ignored.
- Block data.
The block list is followed by the block map, which is encrypted separately when required but not compressed. The return value of the
filter is the stored block size. Combined with the repo size this allows the block map to be read separately.
***********************************************************************************************************************************/
#ifndef COMMAND_BACKUP_BLOCK_INCR_H
#define COMMAND_BACKUP_BLOCK_INCR_H
#include "common/io/filter/filter.h"
/***********************************************************************************************************************************
Filter type constant
***********************************************************************************************************************************/
#define BLOCK_INCR_FILTER_TYPE STRID5("blk-incr", 0x90dc9dad820)
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
FN_EXTERN IoFilter *blockIncrNew(
size_t blockSize, unsigned int reference, uint64_t bundleId, uint64_t bundleOffset, const Buffer *blockMapPrior,
const IoFilter *compress, const IoFilter *encrypt);
FN_EXTERN IoFilter *blockIncrNewPack(const Pack *paramList);
#endif

View File

@ -0,0 +1,237 @@
/***********************************************************************************************************************************
Block Incremental Map
The block map is stored as a series of block info that are abbreviated when sequential blocks are from the same reference:
- Each block logically contains all fields in BlockMapItem but not all fields are encoded for each block and some are encoded as
deltas:
- Varint-128 encoded reference (which is an index into the reference list maintained in the manifest). If the prior block has the
same reference then this is omitted.
- If this is the first time the reference appears it will be followed by a bundle id (0 if no bundle). The bundle id is always the
same for a reference so it does not need to be encoded more than once.
- Offset where the block is located. For the first block after the reference, the offset is varint-128 encoded. After that it is a
delta of the prior offset for the reference. The offset is not encoded if the block is sequential to the prior block in the
reference. In this case the offset can be calculated by adding the prior size to the prior offset.
- Block size. For the first block this is the varint-128 encoded size. Afterwards it is a delta of the previous block using the
following formula: cvtInt64ToZigZag(blockSize - blockSizeLast) + 1. Adding one is required so the size delta is never zero,
which is used as the stop byte.
- SHA1 checksum of the block.
- If the next block is from a different reference then a varint-128 encoded zero stop byte is added.
The block map is terminated by a varint-128 encoded zero stop byte.
***********************************************************************************************************************************/
#include "build.auto.h"
#include "common/debug.h"
#include "common/log.h"
#include "command/backup/blockMap.h"
/**********************************************************************************************************************************/
typedef struct BlockMapRef
{
unsigned int reference;
uint64_t bundleId;
uint64_t offset;
} BlockMapRef;
static int
lstComparatorBlockMapRef(const void *const blockMapRef1, const void *const blockMapRef2)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM_P(VOID, blockMapRef1);
FUNCTION_TEST_PARAM_P(VOID, blockMapRef2);
FUNCTION_TEST_END();
ASSERT(blockMapRef1 != NULL);
ASSERT(blockMapRef2 != NULL);
if (((BlockMapRef *)blockMapRef1)->reference < ((BlockMapRef *)blockMapRef2)->reference)
FUNCTION_TEST_RETURN(INT, -1);
else if (((BlockMapRef *)blockMapRef1)->reference > ((BlockMapRef *)blockMapRef2)->reference)
FUNCTION_TEST_RETURN(INT, 1);
FUNCTION_TEST_RETURN(INT, 0);
}
FN_EXTERN BlockMap *
blockMapNewRead(IoRead *const map)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_READ, map);
FUNCTION_LOG_END();
BlockMap *const this = blockMapNew();
List *const refList = lstNewP(sizeof(BlockMapRef), .comparator = lstComparatorBlockMapRef);
Buffer *const checksum = bufNew(HASH_TYPE_SHA1_SIZE);
BlockMapRef *blockMapRef = NULL;
uint64_t sizeLast = 0;
do
{
// If no reference is currently being processed
if (blockMapRef == NULL)
{
// Get reference and subtract one (a zero reference indicates the end of the block map)
unsigned int reference = (unsigned int)ioReadVarIntU64(map);
if (reference == 0)
break;
reference--;
// If the reference is not found get bundle id and offset and add to the reference list
blockMapRef = lstFind(refList, &(BlockMapRef){.reference = reference});
if (blockMapRef == NULL)
{
BlockMapRef blockMapRefAdd = {.reference = reference, .bundleId = ioReadVarIntU64(map)};
blockMapRefAdd.offset = ioReadVarIntU64(map);
// Add reference to list
blockMapRef = lstAdd(refList, &blockMapRefAdd);
}
// Else increment the offset
else
blockMapRef->offset += ioReadVarIntU64(map);
}
// Construct block map item
BlockMapItem blockMapItem =
{
.reference = blockMapRef->reference,
.bundleId = blockMapRef->bundleId,
.offset = blockMapRef->offset,
.size = ioReadVarIntU64(map),
};
// If size is zero then this is the last block in the reference so expect a new reference next
if (blockMapItem.size == 0)
{
blockMapRef = NULL;
}
else
{
// The first size is read directly and then each subsequent size is a delta of the previous size. Subtract one from the
// delta which is required to distinguish it from the stop byte.
if (sizeLast == 0)
blockMapItem.size = blockMapItem.size;
else
blockMapItem.size = (uint64_t)(cvtInt64FromZigZag(blockMapItem.size - 1) + (int64_t)sizeLast);
sizeLast = blockMapItem.size;
// Add size to offset
blockMapRef->offset += blockMapItem.size;
bufUsedZero(checksum);
ioRead(map, checksum);
memcpy(blockMapItem.checksum, bufPtr(checksum), bufUsed(checksum));
// Add to list
lstAdd((List *)this, &blockMapItem);
}
}
while (true);
lstFree(refList);
bufFree(checksum);
FUNCTION_LOG_RETURN(BLOCK_MAP, this);
}
/**********************************************************************************************************************************/
FN_EXTERN void
blockMapWrite(const BlockMap *const this, IoWrite *const output)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(BLOCK_MAP, this);
FUNCTION_LOG_PARAM(IO_WRITE, output);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(blockMapSize(this) > 0);
ASSERT(output != NULL);
List *const refList = lstNewP(sizeof(BlockMapRef), .comparator = lstComparatorBlockMapRef);
BlockMapRef *blockMapRef = NULL;
// Write all block items into a packed format
unsigned int referenceLast = UINT_MAX;
uint64_t sizeLast = 0;
for (unsigned int blockMapIdx = 0; blockMapIdx < blockMapSize(this); blockMapIdx++)
{
const BlockMapItem *const blockMapItem = blockMapGet(this, blockMapIdx);
if (referenceLast != blockMapItem->reference)
{
// Terminate last reference
if (referenceLast != UINT_MAX)
ioWriteVarIntU64(output, 0);
// Add reference
ioWriteVarIntU64(output, blockMapItem->reference + 1);
// Check if the reference is already in the list
blockMapRef = lstFind(refList, &(BlockMapRef){.reference = blockMapItem->reference});
if (blockMapRef == NULL)
{
// Add bundle id and offset
ioWriteVarIntU64(output, blockMapItem->bundleId);
ioWriteVarIntU64(output, blockMapItem->offset);
// Add reference to list
blockMapRef = lstAdd(
refList,
&(BlockMapRef){
.reference = blockMapItem->reference, .bundleId = blockMapItem->bundleId, .offset = blockMapItem->offset});
}
else
{
ASSERT(blockMapItem->reference == blockMapRef->reference);
ASSERT(blockMapItem->bundleId == blockMapRef->bundleId);
ASSERT(blockMapItem->offset >= blockMapRef->offset);
// Add offset delta
ioWriteVarIntU64(output, blockMapItem->offset - blockMapRef->offset);
// Update the offset
blockMapRef->offset = blockMapItem->offset;
}
referenceLast = blockMapItem->reference;
}
// The first size is stored directly and then each subsequent size is a delta of the previous size. Add one to the delta
// so it can be distinguished from the stop byte.
if (sizeLast == 0)
ioWriteVarIntU64(output, blockMapItem->size);
else
ioWriteVarIntU64(output, cvtInt64ToZigZag((int64_t)blockMapItem->size - (int64_t)sizeLast) + 1);
sizeLast = blockMapItem->size;
// Add size to offset
blockMapRef->offset += blockMapItem->size;
// Add checksum
ioWrite(output, BUF(blockMapItem->checksum, HASH_TYPE_SHA1_SIZE));
}
// Write reference end
ioWriteVarIntU64(output, 0);
// Write map end
ioWriteVarIntU64(output, 0);
lstFree(refList);
FUNCTION_LOG_RETURN_VOID();
}

View File

@ -0,0 +1,92 @@
/***********************************************************************************************************************************
Block Incremental Map
The block incremental map stores the location of blocks of data that have been backed up incrementally. When a file changes, instead
of copying the entire file, just the blocks that have been changed can be stored. This map does not store the blocks themselves,
just the location where they can be found. It must be combined with a block list to be useful (see BlockIncr filter).
***********************************************************************************************************************************/
#ifndef COMMAND_BACKUP_BLOCKMAP_H
#define COMMAND_BACKUP_BLOCKMAP_H
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
typedef struct BlockMap BlockMap;
#include "common/crypto/hash.h"
#include "common/type/list.h"
#include "common/type/object.h"
typedef struct BlockMapItem
{
unsigned int reference; // Reference to backup where the block is stored
unsigned char checksum[HASH_TYPE_SHA1_SIZE]; // Checksum of the block
uint64_t bundleId; // Bundle where the block is stored (0 if not bundled)
uint64_t offset; // Offset into the bundle
uint64_t size; // Size of the block (including compression, etc.)
} BlockMapItem;
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
// Create empty block map
FN_INLINE_ALWAYS BlockMap *
blockMapNew(void)
{
return (BlockMap *)OBJ_NAME(lstNewP(sizeof(BlockMapItem)), BlockMap::List);
}
// New block map from IO
FN_EXTERN BlockMap *blockMapNewRead(IoRead *map);
/***********************************************************************************************************************************
Functions
***********************************************************************************************************************************/
// Add a block map item
FN_INLINE_ALWAYS const BlockMapItem *
blockMapAdd(BlockMap *const this, const BlockMapItem *const item)
{
ASSERT_INLINE(item != NULL);
ASSERT_INLINE(item->size != 0);
return (BlockMapItem *)lstAdd((List *const)this, item);
}
// Write map to IO
FN_EXTERN void blockMapWrite(const BlockMap *this, IoWrite *output);
/***********************************************************************************************************************************
Getters/Setters
***********************************************************************************************************************************/
// Get a block map item
FN_INLINE_ALWAYS const BlockMapItem *
blockMapGet(const BlockMap *const this, const unsigned int mapIdx)
{
return (BlockMapItem *)lstGet((List *const)this, mapIdx);
}
// Block map size
FN_INLINE_ALWAYS unsigned int
blockMapSize(const BlockMap *const this)
{
return lstSize((List *const)this);
}
/***********************************************************************************************************************************
Destructor
***********************************************************************************************************************************/
FN_INLINE_ALWAYS void
blockMapFree(BlockMap *const this)
{
objFree(this);
}
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
#define FUNCTION_LOG_BLOCK_MAP_TYPE \
BlockMap *
#define FUNCTION_LOG_BLOCK_MAP_FORMAT(value, buffer, bufferSize) \
objNameToLog(value, "BlockMap", buffer, bufferSize)
#endif

View File

@ -25,6 +25,7 @@ backupFileRepoPath(const String *const backupLabel, const BackupFileRepoPathPara
FUNCTION_TEST_PARAM(STRING, param.manifestName);
FUNCTION_TEST_PARAM(UINT64, param.bundleId);
FUNCTION_TEST_PARAM(ENUM, param.compressType);
FUNCTION_TEST_PARAM(BOOL, param.blockIncr);
FUNCTION_TEST_END();
ASSERT(backupLabel != NULL);
@ -35,7 +36,11 @@ backupFileRepoPath(const String *const backupLabel, const BackupFileRepoPathPara
if (param.bundleId != 0)
strCatFmt(result, MANIFEST_PATH_BUNDLE "/%" PRIu64, param.bundleId);
else
strCatFmt(result, "%s%s", strZ(param.manifestName), strZ(compressExtStr(param.compressType)));
{
strCatFmt(
result, "%s%s", strZ(param.manifestName),
param.blockIncr ? BACKUP_BLOCK_INCR_EXT : strZ(compressExtStr(param.compressType)));
}
FUNCTION_TEST_RETURN(STRING, result);
}

View File

@ -15,6 +15,7 @@ Common Functions and Definitions for Backup and Expire Commands
Backup constants
***********************************************************************************************************************************/
#define BACKUP_PATH_HISTORY "backup.history"
#define BACKUP_BLOCK_INCR_EXT ".pgbi"
/***********************************************************************************************************************************
Functions
@ -25,6 +26,7 @@ typedef struct BackupFileRepoPathParam
const String *manifestName; // File name in manifest
uint64_t bundleId; // Is the file bundled?
CompressType compressType; // Is the file compressed?
bool blockIncr; // Is the file a block incremental?
} BackupFileRepoPathParam;
#define backupFileRepoPathP(backupLabel, ...) \

View File

@ -5,6 +5,7 @@ Backup File
#include <string.h>
#include "command/backup/blockIncr.h"
#include "command/backup/file.h"
#include "command/backup/pageChecksum.h"
#include "common/crypto/cipherBlock.h"
@ -38,11 +39,14 @@ segmentNumber(const String *pgFile)
/**********************************************************************************************************************************/
FN_EXTERN List *
backupFile(
const String *const repoFile, const CompressType repoFileCompressType, const int repoFileCompressLevel,
const CipherType cipherType, const String *const cipherPass, const List *const fileList)
const String *const repoFile, const uint64_t bundleId, const unsigned int blockIncrReference,
const CompressType repoFileCompressType, const int repoFileCompressLevel, const CipherType cipherType,
const String *const cipherPass, const List *const fileList)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STRING, repoFile); // Repo file
FUNCTION_LOG_PARAM(UINT64, bundleId); // Bundle id (0 if none)
FUNCTION_LOG_PARAM(UINT, blockIncrReference); // Block incremental reference to use in map
FUNCTION_LOG_PARAM(ENUM, repoFileCompressType); // Compress type for repo file
FUNCTION_LOG_PARAM(INT, repoFileCompressLevel); // Compression level for repo file
FUNCTION_LOG_PARAM(STRING_ID, cipherType); // Encryption type
@ -208,23 +212,63 @@ backupFile(
segmentNumber(file->pgFile), PG_SEGMENT_PAGE_DEFAULT, storagePathP(storagePg(), file->pgFile)));
}
// Add compression
if (repoFileCompressType != compressTypeNone)
// Compress filter
IoFilter *const compress = repoFileCompressType != compressTypeNone ?
compressFilter(repoFileCompressType, repoFileCompressLevel) : NULL;
// Encrypt filter
IoFilter *const encrypt = cipherType != cipherTypeNone ?
cipherBlockNewP(cipherModeEncrypt, cipherType, BUFSTR(cipherPass), .raw = file->blockIncrSize != 0) : NULL;
// If block incremental then add the filter and pass compress/encrypt filters to it since each block is
// compressed/encrypted separately
if (file->blockIncrSize != 0)
{
// Read prior block map
const Buffer *blockMap = NULL;
if (file->blockIncrMapPriorFile != NULL)
{
StorageRead *const blockMapRead = storageNewReadP(
storageRepo(), file->blockIncrMapPriorFile, .offset = file->blockIncrMapPriorOffset,
.limit = VARUINT64(file->blockIncrMapPriorSize));
if (cipherType != cipherTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(blockMapRead)),
cipherBlockNewP(cipherModeDecrypt, cipherType, BUFSTR(cipherPass), .raw = true));
}
blockMap = storageGetP(blockMapRead);
}
// Add block incremental filter
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)), compressFilter(repoFileCompressType, repoFileCompressLevel));
ioReadFilterGroup(
storageReadIo(read)),
blockIncrNew(
(size_t)file->blockIncrSize, blockIncrReference, bundleId, bundleOffset, blockMap, compress,
encrypt));
repoChecksum = true;
}
// If there is a cipher then add the encrypt filter
if (cipherType != cipherTypeNone)
// Else apply compress/encrypt filters to the entire file
else
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(read)),
cipherBlockNewP(cipherModeEncrypt, cipherType, BUFSTR(cipherPass)));
// Add compress filter
if (compress != NULL)
{
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), compress);
repoChecksum = true;
}
repoChecksum = true;
// Add encrypt filter
if (encrypt != NULL)
{
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), encrypt);
repoChecksum = true;
}
}
// Capture checksum of file stored in the repo if filters that modify the output have been applied
@ -277,6 +321,13 @@ backupFile(
ioFilterGroupResultPackP(ioReadFilterGroup(storageReadIo(read)), PAGE_CHECKSUM_FILTER_TYPE));
}
// Get results of block incremental
if (file->blockIncrSize != 0)
{
fileResult->blockIncrMapSize = pckReadU64P(
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), BLOCK_INCR_FILTER_TYPE));
}
// Get repo checksum
if (repoChecksum)
{

View File

@ -33,6 +33,10 @@ typedef struct BackupFile
bool pgFileCopyExactSize; // Copy only pg expected size
const Buffer *pgFileChecksum; // Expected pg file checksum
bool pgFileChecksumPage; // Validate page checksums?
uint64_t blockIncrSize; // Perform block incremental on this file?
const String *blockIncrMapPriorFile; // File containing prior block incremental map (NULL if none)
uint64_t blockIncrMapPriorOffset; // Offset of prior block incremental map
uint64_t blockIncrMapPriorSize; // Size of prior block incremental map
const String *manifestFile; // Repo file
const Buffer *repoFileChecksum; // Expected repo file checksum
uint64_t repoFileSize; // Expected repo file size
@ -49,11 +53,12 @@ typedef struct BackupFileResult
Buffer *repoChecksum; // Checksum repo file (including compression, etc.)
uint64_t bundleOffset; // Offset in bundle if any
uint64_t repoSize;
uint64_t blockIncrMapSize; // Size of block incremental map (0 if no map)
Pack *pageChecksumResult;
} BackupFileResult;
FN_EXTERN List *backupFile(
const String *repoFile, CompressType repoFileCompressType, int repoFileCompressLevel, CipherType cipherType,
const String *cipherPass, const List *fileList);
const String *repoFile, uint64_t bundleId, unsigned int blockIncrReference, CompressType repoFileCompressType,
int repoFileCompressLevel, CipherType cipherType, const String *cipherPass, const List *fileList);
#endif

View File

@ -29,6 +29,8 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
{
// Backup options that apply to all files
const String *const repoFile = pckReadStrP(param);
uint64_t bundleId = pckReadU64P(param);
const unsigned int blockIncrReference = (unsigned int)pckReadU64P(param);
const CompressType repoFileCompressType = (CompressType)pckReadU32P(param);
const int repoFileCompressLevel = pckReadI32P(param);
const CipherType cipherType = (CipherType)pckReadU64P(param);
@ -46,6 +48,19 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
file.pgFileCopyExactSize = pckReadBoolP(param);
file.pgFileChecksum = pckReadBinP(param);
file.pgFileChecksumPage = pckReadBoolP(param);
file.blockIncrSize = pckReadU64P(param);
if (file.blockIncrSize > 0)
{
file.blockIncrMapPriorFile = pckReadStrP(param);
if (file.blockIncrMapPriorFile != NULL)
{
file.blockIncrMapPriorOffset = pckReadU64P(param);
file.blockIncrMapPriorSize = pckReadU64P(param);
}
}
file.manifestFile = pckReadStrP(param);
file.repoFileChecksum = pckReadBinP(param);
file.repoFileSize = pckReadU64P(param);
@ -57,7 +72,7 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
// Backup file
const List *const result = backupFile(
repoFile, repoFileCompressType, repoFileCompressLevel, cipherType, cipherPass, fileList);
repoFile, bundleId, blockIncrReference, repoFileCompressType, repoFileCompressLevel, cipherType, cipherPass, fileList);
// Return result
PackWrite *const resultPack = protocolPackNew();
@ -70,6 +85,7 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
pckWriteU32P(resultPack, fileResult->backupCopyResult);
pckWriteU64P(resultPack, fileResult->copySize);
pckWriteU64P(resultPack, fileResult->bundleOffset);
pckWriteU64P(resultPack, fileResult->blockIncrMapSize);
pckWriteU64P(resultPack, fileResult->repoSize);
pckWriteBinP(resultPack, fileResult->copyChecksum);
pckWriteBinP(resultPack, fileResult->repoChecksum);

View File

@ -56,11 +56,13 @@ VARIANT_STRDEF_STATIC(KEY_ARCHIVE_VAR, "archive");
VARIANT_STRDEF_STATIC(KEY_CIPHER_VAR, "cipher");
VARIANT_STRDEF_STATIC(KEY_DATABASE_VAR, "database");
VARIANT_STRDEF_STATIC(KEY_DELTA_VAR, "delta");
VARIANT_STRDEF_STATIC(KEY_DELTA_MAP_VAR, "delta-map");
VARIANT_STRDEF_STATIC(KEY_DESTINATION_VAR, "destination");
VARIANT_STRDEF_STATIC(KEY_NAME_VAR, "name");
VARIANT_STRDEF_STATIC(KEY_OID_VAR, "oid");
VARIANT_STRDEF_STATIC(KEY_REPO_KEY_VAR, "repo-key");
VARIANT_STRDEF_STATIC(KEY_SIZE_VAR, "size");
VARIANT_STRDEF_STATIC(KEY_SIZE_MAP_VAR, "size-map");
VARIANT_STRDEF_STATIC(KEY_START_VAR, "start");
VARIANT_STRDEF_STATIC(KEY_STOP_VAR, "stop");
VARIANT_STRDEF_STATIC(REPO_KEY_KEY_VAR, "key");
@ -454,6 +456,12 @@ backupListAdd(
kvPut(repoInfo, KEY_SIZE_VAR, VARUINT64(backupData->backupInfoRepoSize));
kvPut(repoInfo, KEY_DELTA_VAR, VARUINT64(backupData->backupInfoRepoSizeDelta));
if (outputJson && backupData->backupInfoRepoSizeMap != NULL)
{
kvPut(repoInfo, KEY_SIZE_MAP_VAR, backupData->backupInfoRepoSizeMap);
kvPut(repoInfo, KEY_DELTA_MAP_VAR, backupData->backupInfoRepoSizeMapDelta);
}
// timestamp section
KeyValue *timeInfo = kvPutKv(varKv(backupInfo), BACKUP_KEY_TIMESTAMP_VAR);

View File

@ -5,6 +5,7 @@ Remote Command
#include <string.h>
#include "command/backup/blockIncr.h"
#include "command/backup/pageChecksum.h"
#include "command/control/common.h"
#include "common/crypto/cipherBlock.h"
@ -35,6 +36,7 @@ Filter handlers
***********************************************************************************************************************************/
static const StorageRemoteFilterHandler storageRemoteFilterHandlerList[] =
{
{.type = BLOCK_INCR_FILTER_TYPE, .handlerParam = blockIncrNewPack},
{.type = CIPHER_BLOCK_FILTER_TYPE, .handlerParam = cipherBlockNewPack},
{.type = CRYPTO_HASH_FILTER_TYPE, .handlerParam = cryptoHashNewPack},
{.type = PAGE_CHECKSUM_FILTER_TYPE, .handlerParam = pageChecksumNewPack},

View File

@ -0,0 +1,172 @@
/***********************************************************************************************************************************
Restore Delta Map
***********************************************************************************************************************************/
#include "build.auto.h"
#include "command/restore/deltaMap.h"
#include "common/crypto/common.h"
#include "common/crypto/hash.h"
#include "common/debug.h"
#include "common/log.h"
#include "common/type/object.h"
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
typedef struct DeltaMap
{
MemContext *memContext; // Mem context of filter
size_t blockSize; // Block size for checksums
size_t blockCurrent; // Size of current block
IoFilter *hash; // Hash of current block
List *list; // List if hashes
} DeltaMap;
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
#define FUNCTION_LOG_DELTA_MAP_TYPE \
DeltaMap *
#define FUNCTION_LOG_DELTA_MAP_FORMAT(value, buffer, bufferSize) \
objNameToLog(value, "DeltaMap", buffer, bufferSize)
/***********************************************************************************************************************************
Process delta
***********************************************************************************************************************************/
static void
deltaMapProcess(THIS_VOID, const Buffer *const input)
{
THIS(DeltaMap);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(DELTA_MAP, this);
FUNCTION_LOG_PARAM(BUFFER, input);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(input != NULL);
size_t inputOffset = 0;
// Loop until input is consumed
while (inputOffset != bufUsed(input))
{
// Create hash object if needed
if (this->hash == NULL)
{
MEM_CONTEXT_BEGIN(this->memContext)
{
this->hash = cryptoHashNew(hashTypeSha1);
this->blockCurrent = 0;
}
MEM_CONTEXT_END();
}
// Calculate how much data to hash and perform hash
const size_t blockRemains = this->blockSize - this->blockCurrent;
const size_t inputRemains = bufUsed(input) - inputOffset;
const size_t blockHash = blockRemains < inputRemains ? blockRemains : inputRemains;
ioFilterProcessIn(this->hash, BUF(bufPtrConst(input) + inputOffset, blockHash));
// Update amount of data hashed
inputOffset += blockHash;
this->blockCurrent += blockHash;
// If the block size has been reached then output the hash
if (this->blockCurrent == this->blockSize)
{
MEM_CONTEXT_TEMP_BEGIN()
{
lstAdd(this->list, bufPtrConst(pckReadBinP(pckReadNew(ioFilterResult(this->hash)))));
ioFilterFree(this->hash);
this->hash = NULL;
}
MEM_CONTEXT_TEMP_END();
}
}
FUNCTION_LOG_RETURN_VOID();
}
/***********************************************************************************************************************************
Get a binary representation of the hash list
***********************************************************************************************************************************/
static Pack *
deltaMapResult(THIS_VOID)
{
THIS(DeltaMap);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(DELTA_MAP, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
Pack *result = NULL;
MEM_CONTEXT_TEMP_BEGIN()
{
PackWrite *const packWrite = pckWriteNewP();
// If there is a remainder in the hash
if (this->hash)
lstAdd(this->list, bufPtrConst(pckReadBinP(pckReadNew(ioFilterResult(this->hash)))));
pckWriteBinP(packWrite, BUF(lstGet(this->list, 0), lstSize(this->list) * HASH_TYPE_SHA1_SIZE));
pckWriteEndP(packWrite);
result = pckMove(pckWriteResult(packWrite), memContextPrior());
}
MEM_CONTEXT_TEMP_END();
FUNCTION_LOG_RETURN(PACK, result);
}
/**********************************************************************************************************************************/
FN_EXTERN IoFilter *
deltaMapNew(const size_t blockSize)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(SIZE, blockSize);
FUNCTION_LOG_END();
ASSERT(blockSize != 0);
// Allocate memory to hold process state
IoFilter *this = NULL;
OBJ_NEW_BEGIN(DeltaMap, .childQty = MEM_CONTEXT_QTY_MAX, .allocQty = MEM_CONTEXT_QTY_MAX, .callbackQty = 1)
{
DeltaMap *const driver = OBJ_NAME(OBJ_NEW_ALLOC(), IoFilter::DeltaMap);
*driver = (DeltaMap)
{
.memContext = memContextCurrent(),
.blockSize = blockSize,
.list = lstNewP(HASH_TYPE_SHA1_SIZE),
};
// Create param list
Pack *paramList = NULL;
MEM_CONTEXT_TEMP_BEGIN()
{
PackWrite *const packWrite = pckWriteNewP();
pckWriteU64P(packWrite, blockSize);
pckWriteEndP(packWrite);
paramList = pckMove(pckWriteResult(packWrite), memContextPrior());
}
MEM_CONTEXT_TEMP_END();
this = ioFilterNewP(
DELTA_MAP_FILTER_TYPE, driver, paramList, .in = deltaMapProcess, .result = deltaMapResult);
}
OBJ_NEW_END();
FUNCTION_LOG_RETURN(IO_FILTER, this);
}

View File

@ -0,0 +1,23 @@
/***********************************************************************************************************************************
Restore Delta Map
Build a list of hashes based on a block size. This is used to compare the contents of a file to block map to determine what needs to
be updated.
***********************************************************************************************************************************/
#ifndef COMMAND_RESTORE_DELTA_MAP_H
#define COMMAND_RESTORE_DELTA_MAP_H
#include "common/io/filter/filter.h"
#include "common/type/stringId.h"
/***********************************************************************************************************************************
Filter type constant
***********************************************************************************************************************************/
#define DELTA_MAP_FILTER_TYPE STRID5("dlt-map", 0x402ddd1840)
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
FN_EXTERN IoFilter *deltaMapNew(size_t blockSize);
#endif

View File

@ -7,10 +7,14 @@ Restore File
#include <unistd.h>
#include <utime.h>
#include "command/backup/blockMap.h"
#include "command/restore/deltaMap.h"
#include "command/restore/file.h"
#include "common/crypto/cipherBlock.h"
#include "common/crypto/hash.h"
#include "common/debug.h"
#include "common/io/chunkedRead.h"
#include "common/io/fdWrite.h"
#include "common/io/filter/group.h"
#include "common/io/filter/size.h"
#include "common/io/io.h"
@ -23,7 +27,8 @@ Restore File
FN_EXTERN List *
restoreFile(
const String *const repoFile, const unsigned int repoIdx, const CompressType repoFileCompressType, const time_t copyTimeBegin,
const bool delta, const bool deltaForce, const String *const cipherPass, const List *const fileList)
const bool delta, const bool deltaForce, const String *const cipherPass, const StringList *const referenceList,
List *const fileList)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STRING, repoFile);
@ -33,6 +38,7 @@ restoreFile(
FUNCTION_LOG_PARAM(BOOL, delta);
FUNCTION_LOG_PARAM(BOOL, deltaForce);
FUNCTION_TEST_PARAM(STRING, cipherPass);
FUNCTION_LOG_PARAM(STRING_LIST, referenceList); // List of references (for block incremental)
FUNCTION_LOG_PARAM(LIST, fileList); // List of files to restore
FUNCTION_LOG_END();
@ -51,7 +57,7 @@ restoreFile(
// Use a per-file mem context to reduce memory usage
MEM_CONTEXT_TEMP_BEGIN()
{
const RestoreFile *const file = lstGet(fileList, fileIdx);
RestoreFile *const file = lstGet(fileList, fileIdx);
ASSERT(file->name != NULL);
ASSERT(file->limit == NULL || varType(file->limit) == varTypeUInt64);
@ -115,6 +121,11 @@ restoreFile(
{
read = storageReadIo(storageNewReadP(storagePg(), file->name));
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(hashTypeSha1));
// Generate delta map if block incremental
if (file->blockIncrMapSize != 0)
ioFilterGroupAdd(ioReadFilterGroup(read), deltaMapNew((size_t)file->blockIncrSize));
ioReadDrain(read);
}
@ -139,6 +150,20 @@ restoreFile(
fileResult->result = restoreResultPreserve;
}
// If block incremental and not preserving the file, store the delta map for later use in
// reconstructing the pg file
if (file->blockIncrMapSize != 0 && fileResult->result != restoreResultPreserve)
{
PackRead *const deltaMapResult = ioFilterGroupResultP(
ioReadFilterGroup(read), DELTA_MAP_FILTER_TYPE);
MEM_CONTEXT_OBJ_BEGIN(fileList)
{
file->deltaMap = pckReadBinP(deltaMapResult);
}
MEM_CONTEXT_OBJ_END();
}
}
}
}
@ -196,24 +221,29 @@ restoreFile(
ASSERT(varUInt64(file->limit) != 0);
repoFileLimit = varUInt64(file->limit);
// Determine how many files can be copied with one read
for (unsigned int fileNextIdx = fileIdx + 1; fileNextIdx < lstSize(fileList); fileNextIdx++)
// Multiple files cannot be read when the file to read is a block incremental. This is because the
// remote protocol does not support multiple open files at once.
if (file->blockIncrMapSize == 0)
{
// Only files that are being copied are considered
if (((const RestoreFileResult *)lstGet(result, fileNextIdx))->result == restoreResultCopy)
// Determine how many files can be copied with one read
for (unsigned int fileNextIdx = fileIdx + 1; fileNextIdx < lstSize(fileList); fileNextIdx++)
{
const RestoreFile *const fileNext = lstGet(fileList, fileNextIdx);
ASSERT(fileNext->limit != NULL && varUInt64(fileNext->limit) != 0);
// Only files that are being copied are considered
if (((const RestoreFileResult *)lstGet(result, fileNextIdx))->result == restoreResultCopy)
{
const RestoreFile *const fileNext = lstGet(fileList, fileNextIdx);
ASSERT(fileNext->limit != NULL && varUInt64(fileNext->limit) != 0);
// Break if the offset is not the first file's offset + the limit of all additional files so far
if (fileNext->offset != file->offset + repoFileLimit)
// Break if the offset is not the first file's offset + limit of all additional files so far
if (fileNext->offset != file->offset + repoFileLimit)
break;
repoFileLimit += varUInt64(fileNext->limit);
}
// Else if the file was not copied then there is a gap so break
else
break;
repoFileLimit += varUInt64(fileNext->limit);
}
// Else if the file was not copied then there is a gap so break
else
break;
}
}
@ -225,6 +255,16 @@ restoreFile(
storageRepoIdx(repoIdx), repoFile,
.compressible = repoFileCompressType == compressTypeNone && cipherPass == NULL,
.offset = file->offset, .limit = repoFileLimit != 0 ? VARUINT64(repoFileLimit) : NULL);
// Add decryption filter for block incremental map
if (cipherPass != NULL && file->blockIncrMapSize != 0)
{
ioFilterGroupAdd(
ioReadFilterGroup(storageReadIo(repoFileRead)),
cipherBlockNewP(
cipherModeDecrypt, cipherTypeAes256Cbc, BUFSTR(cipherPass), .raw = true));
}
ioReadOpen(storageReadIo(repoFileRead));
}
MEM_CONTEXT_PRIOR_END();
@ -233,31 +273,200 @@ restoreFile(
// Create pg file
StorageWrite *pgFileWrite = storageNewWriteP(
storagePgWrite(), file->name, .modeFile = file->mode, .user = file->user, .group = file->group,
.timeModified = file->timeModified, .noAtomic = true, .noCreatePath = true, .noSyncPath = true);
.timeModified = file->timeModified, .noAtomic = true, .noCreatePath = true, .noSyncPath = true,
.noTruncate = file->deltaMap != NULL);
IoFilterGroup *filterGroup = ioWriteFilterGroup(storageWriteIo(pgFileWrite));
// If block incremental file
const Buffer *checksum = NULL;
// Add decryption filter
if (cipherPass != NULL)
if (file->blockIncrMapSize != 0)
{
ioFilterGroupAdd(
filterGroup, cipherBlockNewP(cipherModeDecrypt, cipherTypeAes256Cbc, BUFSTR(cipherPass)));
ASSERT(referenceList != NULL);
// Read block map. This will be compared to the delta map already created to determine which blocks need to
// be fetched from the repository. If we got here there must be at least one block to fetch.
const BlockMap *const blockMap = blockMapNewRead(storageReadIo(repoFileRead));
// The repo file needs to be closed so that block lists can be read from the remote protocol
ioReadClose(storageReadIo(repoFileRead));
// Size of delta map. If there is no delta map because the pg file does not exist then set to zero, which
// will force all blocks to be updated.
const unsigned int deltaMapSize = file->deltaMap == NULL ?
0 : (unsigned int)(bufUsed(file->deltaMap) / HASH_TYPE_SHA1_SIZE);
// Find and write updated blocks
bool updateFound = false; // Is there a block list to be updated?
unsigned int blockMapMinIdx = 0; // Min block in the list
unsigned int blockMapMaxIdx = 0; // Max block in the list
uint64_t blockListOffset = 0; // Offset to start of block list
uint64_t blockListSize = 0; // Size of all blocks in list
ioWriteOpen(storageWriteIo(pgFileWrite));
for (unsigned int blockMapIdx = 0; blockMapIdx < blockMapSize(blockMap); blockMapIdx++)
{
const BlockMapItem *const blockMapItem = blockMapGet(blockMap, blockMapIdx);
// The block must be updated if it beyond the blocks that exist in the delta map or when the checksum
// stored in the repository is different from the delta map
if (blockMapIdx >= deltaMapSize ||
!bufEq(
BUF(blockMapItem->checksum, HASH_TYPE_SHA1_SIZE),
BUF(bufPtrConst(file->deltaMap) + blockMapIdx * HASH_TYPE_SHA1_SIZE, HASH_TYPE_SHA1_SIZE)))
{
// If no block list is currently being built then start a new one
if (!updateFound)
{
updateFound = true;
blockMapMinIdx = blockMapIdx;
blockMapMaxIdx = blockMapIdx;
blockListOffset = blockMapItem->offset;
blockListSize = blockMapItem->size;
}
// Else add to the current block list
else
{
blockMapMaxIdx = blockMapIdx;
blockListSize += blockMapItem->size;
}
// Check if the next block should be part of this list. If so, continue so the block will be added
// to the list on the next iteration. Otherwise, write out the current block list below.
if (blockMapIdx < blockMapSize(blockMap) - 1)
{
const BlockMapItem *const blockMapItemNext = blockMapGet(blockMap, blockMapIdx + 1);
// Similar to the check above, but also make sure the reference is the same. For blocks to be
// in a common list they must be contiguous and from the same reference.
if (blockMapItem->reference == blockMapItemNext->reference &&
(blockMapIdx + 1 >= deltaMapSize ||
!bufEq(
BUF(blockMapItemNext->checksum, HASH_TYPE_SHA1_SIZE),
BUF(
bufPtrConst(file->deltaMap) + (blockMapIdx + 1) * HASH_TYPE_SHA1_SIZE,
HASH_TYPE_SHA1_SIZE))))
{
continue;
}
}
}
// Update blocks in the list when found
if (updateFound)
{
// Use a per-block-list mem context to reduce memory usage
MEM_CONTEXT_TEMP_BEGIN()
{
// Seek to the min block offset. It is possible we are already at the correct position but it
// is easier and safer to let lseek() figure this out.
THROW_ON_SYS_ERROR_FMT(
lseek(
ioWriteFd(storageWriteIo(pgFileWrite)), (off_t)(blockMapMinIdx * file->blockIncrSize),
SEEK_SET) == -1,
FileOpenError, STORAGE_ERROR_READ_SEEK, (uint64_t)(blockMapMinIdx * file->blockIncrSize),
strZ(storagePathP(storagePg(), file->name)));
// Open the block list for read. Using one read for all blocks is cheaper than reading from the
// file multiple times, which is especially noticeable on object stores. Use the last block in
// the list to construct the name of the repo file where the blocks are stored since it is
// available and must have the same reference and bundle id as the other blocks.
StorageRead *const blockRead = storageNewReadP(
storageRepo(),
backupFileRepoPathP(
strLstGet(referenceList, blockMapItem->reference), .manifestName = file->manifestFile,
.bundleId = blockMapItem->bundleId, .blockIncr = true),
.offset = blockListOffset, .limit = VARUINT64(blockListSize));
ioReadOpen(storageReadIo(blockRead));
for (unsigned int blockMapIdx = blockMapMinIdx; blockMapIdx <= blockMapMaxIdx; blockMapIdx++)
{
// Use a per-block mem context to reduce memory usage
MEM_CONTEXT_TEMP_BEGIN()
{
// Read the block in chunked format
IoRead *const chunkedRead = ioChunkedReadNew(storageReadIo(blockRead));
// Add decryption filter
if (cipherPass != NULL)
{
ioFilterGroupAdd(
ioReadFilterGroup(chunkedRead),
cipherBlockNewP(
cipherModeDecrypt, cipherTypeAes256Cbc, BUFSTR(cipherPass), .raw = true));
}
// Add decompression filter
if (repoFileCompressType != compressTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(chunkedRead), decompressFilter(repoFileCompressType));
}
// Open chunked read
ioReadOpen(chunkedRead);
// Read and discard the block no since we already know it
ioReadVarIntU64(chunkedRead);
// Copy chunked block
ioCopyP(chunkedRead, storageWriteIo(pgFileWrite));
// Flush writes since we may seek to a new location for the next block list
ioWriteFlush(storageWriteIo(pgFileWrite));
}
MEM_CONTEXT_TEMP_END();
}
}
MEM_CONTEXT_TEMP_END();
updateFound = false;
}
}
// Close the file to complete the update
ioWriteClose(storageWriteIo(pgFileWrite));
// Calculate checksum. In theory this is not needed because the file should always be reconstructed
// correctly. However, it seems better to check and the pages should still be buffered making the operation
// very fast.
IoRead *const read = storageReadIo(storageNewReadP(storagePg(), file->name));
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(hashTypeSha1));
ioReadDrain(read);
checksum = pckReadBinP(ioFilterGroupResultP(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE));
}
// Else normal file
else
{
IoFilterGroup *filterGroup = ioWriteFilterGroup(storageWriteIo(pgFileWrite));
// Add decompression filter
if (repoFileCompressType != compressTypeNone)
ioFilterGroupAdd(filterGroup, decompressFilter(repoFileCompressType));
// Add decryption filter
if (cipherPass != NULL)
{
ioFilterGroupAdd(
filterGroup, cipherBlockNewP(cipherModeDecrypt, cipherTypeAes256Cbc, BUFSTR(cipherPass)));
}
// Add sha1 filter
ioFilterGroupAdd(filterGroup, cryptoHashNew(hashTypeSha1));
// Add decompression filter
if (repoFileCompressType != compressTypeNone)
ioFilterGroupAdd(filterGroup, decompressFilter(repoFileCompressType));
// Add size filter
ioFilterGroupAdd(filterGroup, ioSizeNew());
// Add sha1 filter
ioFilterGroupAdd(filterGroup, cryptoHashNew(hashTypeSha1));
// Copy file
ioWriteOpen(storageWriteIo(pgFileWrite));
ioCopyP(storageReadIo(repoFileRead), storageWriteIo(pgFileWrite), .limit = file->limit);
ioWriteClose(storageWriteIo(pgFileWrite));
// Add size filter
ioFilterGroupAdd(filterGroup, ioSizeNew());
// Copy file
ioWriteOpen(storageWriteIo(pgFileWrite));
ioCopyP(storageReadIo(repoFileRead), storageWriteIo(pgFileWrite), .limit = file->limit);
ioWriteClose(storageWriteIo(pgFileWrite));
// Get checksum result
checksum = pckReadBinP(ioFilterGroupResultP(filterGroup, CRYPTO_HASH_FILTER_TYPE));
}
// If more than one file is being copied from a single read then decrement the limit
if (repoFileLimit != 0)
@ -268,14 +477,12 @@ restoreFile(
storageReadFree(repoFileRead);
// Validate checksum
if (!bufEq(file->checksum, pckReadBinP(ioFilterGroupResultP(filterGroup, CRYPTO_HASH_FILTER_TYPE))))
if (!bufEq(file->checksum, checksum))
{
THROW_FMT(
ChecksumError,
"error restoring '%s': actual checksum '%s' does not match expected checksum '%s'", strZ(file->name),
strZ(
strNewEncode(encodingHex, pckReadBinP(ioFilterGroupResultP(filterGroup, CRYPTO_HASH_FILTER_TYPE)))),
strZ(strNewEncode(encodingHex, file->checksum)));
strZ(strNewEncode(encodingHex, checksum)), strZ(strNewEncode(encodingHex, file->checksum)));
}
}
}

View File

@ -33,7 +33,10 @@ typedef struct RestoreFile
const String *group; // Original group
uint64_t offset; // Offset into repo file where pg file is located
const Variant *limit; // Limit for read in the repo file
uint64_t blockIncrMapSize; // Block incremental map size (0 if not incremental)
uint64_t blockIncrSize; // Block incremental size (when map size > 0)
const String *manifestFile; // Manifest file
const Buffer *deltaMap; // Delta for block incremental restore, set in restoreFile()
} RestoreFile;
typedef struct RestoreFileResult
@ -44,6 +47,6 @@ typedef struct RestoreFileResult
FN_EXTERN List *restoreFile(
const String *repoFile, unsigned int repoIdx, CompressType repoFileCompressType, time_t copyTimeBegin, bool delta,
bool deltaForce, const String *cipherPass, const List *fileList);
bool deltaForce, const String *cipherPass, const StringList *referenceList, List *fileList);
#endif

View File

@ -34,6 +34,7 @@ restoreFileProtocol(PackRead *const param, ProtocolServer *const server)
const bool delta = pckReadBoolP(param);
const bool deltaForce = pckReadBoolP(param);
const String *const cipherPass = pckReadStrP(param);
const StringList *const referenceList = pckReadStrLstP(param);
// Build the file list
List *fileList = lstNewP(sizeof(RestoreFile));
@ -55,6 +56,12 @@ restoreFileProtocol(PackRead *const param, ProtocolServer *const server)
file.limit = varNewUInt64(pckReadU64P(param));
}
// Block incremental
file.blockIncrMapSize = pckReadU64P(param);
if (file.blockIncrMapSize != 0)
file.blockIncrSize = pckReadU64P(param);
file.manifestFile = pckReadStrP(param);
lstAdd(fileList, &file);
@ -62,7 +69,7 @@ restoreFileProtocol(PackRead *const param, ProtocolServer *const server)
// Restore files
const List *const result = restoreFile(
repoFile, repoIdx, repoFileCompressType, copyTimeBegin, delta, deltaForce, cipherPass, fileList);
repoFile, repoIdx, repoFileCompressType, copyTimeBegin, delta, deltaForce, cipherPass, referenceList, fileList);
// Return result
PackWrite *const resultPack = protocolPackNew();

View File

@ -2326,13 +2326,15 @@ static ProtocolParallelJob *restoreJobCallback(void *data, unsigned int clientId
backupFileRepoPathP(
file.reference != NULL ? file.reference : manifestData(jobData->manifest)->backupLabel,
.manifestName = file.name, .bundleId = file.bundleId,
.compressType = manifestData(jobData->manifest)->backupOptionCompressType));
.compressType = manifestData(jobData->manifest)->backupOptionCompressType,
.blockIncr = file.blockIncrMapSize != 0));
pckWriteU32P(param, jobData->repoIdx);
pckWriteU32P(param, manifestData(jobData->manifest)->backupOptionCompressType);
pckWriteTimeP(param, manifestData(jobData->manifest)->backupTimestampCopyStart);
pckWriteBoolP(param, cfgOptionBool(cfgOptDelta));
pckWriteBoolP(param, cfgOptionBool(cfgOptDelta) && cfgOptionBool(cfgOptForce));
pckWriteStrP(param, jobData->cipherSubPass);
pckWriteStrLstP(param, manifestReferenceList(jobData->manifest));
fileAdded = true;
}
@ -2346,15 +2348,30 @@ static ProtocolParallelJob *restoreJobCallback(void *data, unsigned int clientId
pckWriteStrP(param, restoreManifestOwnerReplace(file.user, jobData->rootReplaceUser));
pckWriteStrP(param, restoreManifestOwnerReplace(file.group, jobData->rootReplaceGroup));
if (file.bundleId != 0)
// If block incremental then modify offset and size to where the map is stored since we need to read that first.
if (file.blockIncrMapSize != 0)
{
pckWriteBoolP(param, true);
pckWriteU64P(param, file.bundleOffset + file.sizeRepo - file.blockIncrMapSize);
pckWriteU64P(param, file.blockIncrMapSize);
}
// Else write bundle offset/size
else if (file.bundleId != 0)
{
pckWriteBoolP(param, true);
pckWriteU64P(param, file.bundleOffset);
pckWriteU64P(param, file.sizeRepo);
}
// Else restore as a whole file
else
pckWriteBoolP(param, false);
// Block incremental
pckWriteU64P(param, file.blockIncrMapSize);
if (file.blockIncrMapSize != 0)
pckWriteU64P(param, file.blockIncrSize);
pckWriteStrP(param, file.name);
// Remove job from the queue

139
src/common/io/chunkedRead.c Normal file
View File

@ -0,0 +1,139 @@
/***********************************************************************************************************************************
Read Chunked I/O
***********************************************************************************************************************************/
#include "build.auto.h"
#include "common/debug.h"
#include "common/io/bufferRead.h"
#include "common/io/read.h"
#include "common/log.h"
#include "common/type/object.h"
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
typedef struct IoChunkedRead
{
IoRead *read; // IoRead to read chunked data from
bool eof; // Has the end of the chunked data been reached?
size_t chunkLast; // Size of the last chunk
size_t chunkRemains; // Remaining data in the current chunk
} IoChunkedRead;
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
#define FUNCTION_LOG_IO_CHUNKED_READ_TYPE \
IoChunkedRead *
#define FUNCTION_LOG_IO_CHUNKED_READ_FORMAT(value, buffer, bufferSize) \
objNameToLog(value, "IoChunkedRead", buffer, bufferSize)
/***********************************************************************************************************************************
Read data from the buffer
***********************************************************************************************************************************/
static size_t
ioChunkedRead(THIS_VOID, Buffer *const buffer, const bool block)
{
THIS(IoChunkedRead);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_CHUNKED_READ, this);
FUNCTION_LOG_PARAM(BUFFER, buffer);
FUNCTION_LOG_PARAM(BOOL, block);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(buffer != NULL);
size_t actualBytes = 0;
// Keep reading until the output buffer is full
while (!bufFull(buffer))
{
// If no data remaining in chunk then read the next chunk header
if (this->chunkRemains == 0)
{
const uint64_t chunkDelta = ioReadVarIntU64(this->read);
// Stop when chunk delta is zero, which indicates the end of the chunk list
if (chunkDelta == 0)
{
this->eof = true;
break;
}
// Calculate next chunk size from delta
if (this->chunkLast == 0)
this->chunkRemains = (size_t)chunkDelta;
else
this->chunkRemains = (size_t)(cvtInt64FromZigZag(chunkDelta - 1) + (int64_t)this->chunkLast);
this->chunkLast = this->chunkRemains;
}
// If the entire chunk will fit in the output buffer
if (this->chunkRemains < bufRemains(buffer))
{
bufLimitSet(buffer, bufUsed(buffer) + this->chunkRemains);
ioRead(this->read, buffer);
actualBytes += this->chunkRemains;
this->chunkRemains = 0;
}
// Else only part of the chunk will fit in the output
else
{
actualBytes += bufRemains(buffer);
this->chunkRemains -= bufRemains(buffer);
ioRead(this->read, buffer);
}
}
FUNCTION_LOG_RETURN(SIZE, actualBytes);
}
/***********************************************************************************************************************************
Have all bytes been read from the buffer?
***********************************************************************************************************************************/
static bool
ioChunkedReadEof(THIS_VOID)
{
THIS(IoChunkedRead);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_CHUNKED_READ, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
FUNCTION_LOG_RETURN(BOOL, this->eof);
}
/**********************************************************************************************************************************/
FN_EXTERN IoRead *
ioChunkedReadNew(IoRead *const read)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_READ, read);
FUNCTION_LOG_END();
ASSERT(read != NULL);
IoRead *this = NULL;
OBJ_NEW_BEGIN(IoChunkedRead, .childQty = MEM_CONTEXT_QTY_MAX, .allocQty = MEM_CONTEXT_QTY_MAX)
{
IoChunkedRead *const driver = OBJ_NAME(OBJ_NEW_ALLOC(), IoRead::IoChunkedRead);
*driver = (IoChunkedRead)
{
.read = read,
};
this = ioReadNewP(driver, .eof = ioChunkedReadEof, .read = ioChunkedRead);
}
OBJ_NEW_END();
FUNCTION_LOG_RETURN(IO_READ, this);
}

View File

@ -0,0 +1,16 @@
/***********************************************************************************************************************************
Read Chunked I/O
Read data that has been chunked with the IoChunk filter.
***********************************************************************************************************************************/
#ifndef COMMON_IO_CHUNKEDREAD_H
#define COMMON_IO_CHUNKEDREAD_H
#include "common/io/read.h"
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
FN_EXTERN IoRead *ioChunkedReadNew(IoRead *read);
#endif

View File

@ -0,0 +1,172 @@
/***********************************************************************************************************************************
Chunk Filter
***********************************************************************************************************************************/
#include "build.auto.h"
#include "common/debug.h"
#include "common/io/filter/chunk.h"
#include "common/log.h"
#include "common/type/object.h"
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
typedef struct IoChunk
{
MemContext *memContext; // Mem context of filter
const uint8_t *buffer; // Internal buffer
size_t bufferSize; // Buffer size
size_t bufferOffset; // Buffer offset
size_t sizeLast; // Size of last chunk
bool done; // Is the filter done?
uint8_t header[CVT_VARINT128_BUFFER_SIZE]; // Chunk header
} IoChunk;
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
#define FUNCTION_LOG_IO_CHUNK_TYPE \
IoChunk *
#define FUNCTION_LOG_IO_CHUNK_FORMAT(value, buffer, bufferSize) \
objNameToLog(value, "IoChunk", buffer, bufferSize)
/***********************************************************************************************************************************
Should the same input be provided again?
***********************************************************************************************************************************/
static bool
ioChunkInputSame(const THIS_VOID)
{
THIS(const IoChunk);
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(IO_CHUNK, this);
FUNCTION_TEST_END();
ASSERT(this != NULL);
FUNCTION_TEST_RETURN(BOOL, this->buffer != NULL);
}
/***********************************************************************************************************************************
Is filter done?
***********************************************************************************************************************************/
static bool
ioChunkDone(const THIS_VOID)
{
THIS(const IoChunk);
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(IO_CHUNK, this);
FUNCTION_TEST_END();
ASSERT(this != NULL);
FUNCTION_TEST_RETURN(BOOL, this->done && !ioChunkInputSame(this));
}
/***********************************************************************************************************************************
Count bytes in the input
***********************************************************************************************************************************/
static void
ioChunkProcess(THIS_VOID, const Buffer *const input, Buffer *const output)
{
THIS(IoChunk);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_CHUNK, this);
FUNCTION_LOG_PARAM(BUFFER, input);
FUNCTION_LOG_PARAM(BUFFER, output);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(output != NULL);
// If there is input to process
if (input != NULL)
{
// Write the chunk size
if (this->buffer == NULL)
{
// Initialize the header with the chunk size
this->buffer = this->header;
this->bufferSize = 0;
this->bufferOffset = 0;
cvtUInt64ToVarInt128(
this->sizeLast == 0 ? bufUsed(input) : cvtInt64ToZigZag((int64_t)bufUsed(input) - (int64_t)this->sizeLast) + 1,
this->header, &this->bufferSize, SIZE_OF_STRUCT_MEMBER(IoChunk, header));
this->sizeLast = bufUsed(input);
}
// Output the chunk
do
{
// Output the entire buffer if possible
if (bufRemains(output) >= this->bufferSize - this->bufferOffset)
{
bufCatC(output, this->buffer, this->bufferOffset, this->bufferSize - this->bufferOffset);
// If the header was written then switch to the chunk
if (this->buffer == this->header)
{
this->buffer = bufPtrConst(input);
this->bufferSize = bufUsed(input);
this->bufferOffset = 0;
}
// Else done writing the chunk
else
this->buffer = NULL;
}
// Else output part of the buffer
else
{
const size_t outputSize = bufRemains(output);
bufCatC(output, this->buffer, this->bufferOffset, outputSize);
this->bufferOffset += outputSize;
}
}
while (ioChunkInputSame(this) && !bufFull(output));
}
// Else processing is complete
else
{
ASSERT(bufRemains(output) > 0);
// Write the terminating zero byte
*(bufPtr(output) + bufUsed(output)) = '\0';
bufUsedInc(output, 1);
this->done = true;
}
FUNCTION_LOG_RETURN_VOID();
}
/**********************************************************************************************************************************/
FN_EXTERN IoFilter *
ioChunkNew(void)
{
FUNCTION_LOG_VOID(logLevelTrace);
IoFilter *this = NULL;
OBJ_NEW_BEGIN(IoChunk, .childQty = MEM_CONTEXT_QTY_MAX, .allocQty = MEM_CONTEXT_QTY_MAX)
{
IoChunk *const driver = OBJ_NAME(OBJ_NEW_ALLOC(), IoFilter::IoChunk);
*driver = (IoChunk)
{
.memContext = memContextCurrent(),
};
this = ioFilterNewP(
CHUNK_FILTER_TYPE, driver, NULL, .done = ioChunkDone, .inOut = ioChunkProcess,
.inputSame = ioChunkInputSame);
}
OBJ_NEW_END();
FUNCTION_LOG_RETURN(IO_FILTER, this);
}

View File

@ -0,0 +1,21 @@
/***********************************************************************************************************************************
Chunk Filter
Split data up into chunks so it can be written (and later read) without knowing the eventual size of the data.
***********************************************************************************************************************************/
#ifndef COMMON_IO_FILTER_CHUNK_H
#define COMMON_IO_FILTER_CHUNK_H
#include "common/io/filter/filter.h"
/***********************************************************************************************************************************
Filter type constant
***********************************************************************************************************************************/
#define CHUNK_FILTER_TYPE STRID5("chunk", 0xb755030)
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
FN_EXTERN IoFilter *ioChunkNew(void);
#endif

View File

@ -196,9 +196,6 @@ ioWriteStrLine(IoWrite *this, const String *string)
}
/**********************************************************************************************************************************/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
FN_EXTERN void
ioWriteVarIntU64(IoWrite *const this, const uint64_t value)
{
@ -218,8 +215,6 @@ ioWriteVarIntU64(IoWrite *const this, const uint64_t value)
FUNCTION_LOG_RETURN_VOID();
}
#pragma GCC diagnostic pop
/**********************************************************************************************************************************/
FN_EXTERN void
ioWriteFlush(IoWrite *this)

View File

@ -130,7 +130,7 @@ Option constants
#define CFGOPT_TYPE "type"
#define CFGOPT_VERBOSE "verbose"
#define CFG_OPTION_TOTAL 157
#define CFG_OPTION_TOTAL 158
/***********************************************************************************************************************************
Option value constants
@ -443,6 +443,7 @@ typedef enum
cfgOptRepoAzureKey,
cfgOptRepoAzureKeyType,
cfgOptRepoAzureUriStyle,
cfgOptRepoBlock,
cfgOptRepoBundle,
cfgOptRepoBundleLimit,
cfgOptRepoBundleSize,

View File

@ -4853,6 +4853,41 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] =
), // opt/repo-azure-uri-style
), // opt/repo-azure-uri-style
// -----------------------------------------------------------------------------------------------------------------------------
PARSE_RULE_OPTION // opt/repo-block
( // opt/repo-block
PARSE_RULE_OPTION_NAME("repo-block"), // opt/repo-block
PARSE_RULE_OPTION_TYPE(cfgOptTypeBoolean), // opt/repo-block
PARSE_RULE_OPTION_NEGATE(true), // opt/repo-block
PARSE_RULE_OPTION_RESET(true), // opt/repo-block
PARSE_RULE_OPTION_REQUIRED(true), // opt/repo-block
PARSE_RULE_OPTION_SECTION(cfgSectionGlobal), // opt/repo-block
PARSE_RULE_OPTION_GROUP_MEMBER(true), // opt/repo-block
PARSE_RULE_OPTION_GROUP_ID(cfgOptGrpRepo), // opt/repo-block
// opt/repo-block
PARSE_RULE_OPTION_COMMAND_ROLE_MAIN_VALID_LIST // opt/repo-block
( // opt/repo-block
PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) // opt/repo-block
), // opt/repo-block
// opt/repo-block
PARSE_RULE_OPTIONAL // opt/repo-block
( // opt/repo-block
PARSE_RULE_OPTIONAL_GROUP // opt/repo-block
( // opt/repo-block
PARSE_RULE_OPTIONAL_DEPEND // opt/repo-block
( // opt/repo-block
PARSE_RULE_OPTIONAL_DEPEND_DEFAULT(PARSE_RULE_VAL_BOOL_FALSE), // opt/repo-block
PARSE_RULE_VAL_OPT(cfgOptRepoBundle), // opt/repo-block
PARSE_RULE_VAL_BOOL_TRUE, // opt/repo-block
), // opt/repo-block
// opt/repo-block
PARSE_RULE_OPTIONAL_DEFAULT // opt/repo-block
( // opt/repo-block
PARSE_RULE_VAL_BOOL_FALSE, // opt/repo-block
), // opt/repo-block
), // opt/repo-block
), // opt/repo-block
), // opt/repo-block
// -----------------------------------------------------------------------------------------------------------------------------
PARSE_RULE_OPTION // opt/repo-bundle
( // opt/repo-bundle
PARSE_RULE_OPTION_NAME("repo-bundle"), // opt/repo-bundle
@ -9427,6 +9462,7 @@ static const uint8_t optionResolveOrder[] =
cfgOptRepoAzureKey, // opt-resolve-order
cfgOptRepoAzureKeyType, // opt-resolve-order
cfgOptRepoAzureUriStyle, // opt-resolve-order
cfgOptRepoBlock, // opt-resolve-order
cfgOptRepoCipherPass, // opt-resolve-order
cfgOptRepoGcsKeyType, // opt-resolve-order
cfgOptRepoHost, // opt-resolve-order

View File

@ -2078,9 +2078,15 @@ configParse(const Storage *storage, unsigned int argListSize, const char *argLis
if (!dependResult.valid && optionSet && parseOptionValue->source == cfgSourceParam)
{
PackRead *filter = pckReadNewC(optionalRules.valid, optionalRules.validSize);
ConfigOption dependId = pckReadU32P(filter);
// Get depend option name
// If there is a boolean default value just consume it since it is not needed here
pckReadNext(filter);
if (pckReadType(filter) == pckTypeBool)
pckReadBoolP(filter);
// Get depend option id and name
ConfigOption dependId = pckReadU32P(filter);
const String *dependOptionName = STR(cfgParseOptionKeyIdxName(dependId, optionKeyIdx));
// If depend value is not set

View File

@ -102,6 +102,8 @@ Create new object and load contents from a file
#define INFO_BACKUP_KEY_BACKUP_ARCHIVE_STOP "backup-archive-stop"
#define INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE "backup-info-repo-size"
#define INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE_DELTA "backup-info-repo-size-delta"
#define INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE_MAP "backup-info-repo-size-map"
#define INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE_MAP_DELTA "backup-info-repo-size-map-delta"
#define INFO_BACKUP_KEY_BACKUP_INFO_SIZE "backup-info-size"
#define INFO_BACKUP_KEY_BACKUP_INFO_SIZE_DELTA "backup-info-size-delta"
#define INFO_BACKUP_KEY_BACKUP_LSN_START "backup-lsn-start"
@ -173,6 +175,14 @@ infoBackupLoadCallback(void *data, const String *section, const String *key, con
// Size info
info.backupInfoRepoSize = jsonReadUInt64(jsonReadKeyRequireZ(json, INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE));
info.backupInfoRepoSizeDelta = jsonReadUInt64(jsonReadKeyRequireZ(json, INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE_DELTA));
if (jsonReadKeyExpectZ(json, INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE_MAP))
{
info.backupInfoRepoSizeMap = varNewUInt64(jsonReadUInt64(json));
info.backupInfoRepoSizeMapDelta = varNewUInt64(
jsonReadUInt64(jsonReadKeyRequireZ(json, INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE_MAP_DELTA)));
}
info.backupInfoSize = jsonReadUInt64(jsonReadKeyRequireZ(json, INFO_BACKUP_KEY_BACKUP_INFO_SIZE));
info.backupInfoSizeDelta = jsonReadUInt64(jsonReadKeyRequireZ(json, INFO_BACKUP_KEY_BACKUP_INFO_SIZE_DELTA));
@ -285,6 +295,20 @@ infoBackupSaveCallback(void *const data, const String *const sectionNext, InfoSa
jsonWriteUInt64(jsonWriteKeyZ(json, INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE), backupData.backupInfoRepoSize);
jsonWriteUInt64(jsonWriteKeyZ(json, INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE_DELTA), backupData.backupInfoRepoSizeDelta);
ASSERT(
(backupData.backupInfoRepoSizeMap != NULL && backupData.backupInfoRepoSizeMap != NULL) ||
(backupData.backupInfoRepoSizeMap == NULL && backupData.backupInfoRepoSizeMap == NULL));
if (backupData.backupInfoRepoSizeMap != NULL)
{
jsonWriteUInt64(
jsonWriteKeyZ(json, INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE_MAP), varUInt64(backupData.backupInfoRepoSizeMap));
jsonWriteUInt64(
jsonWriteKeyZ(json, INFO_BACKUP_KEY_BACKUP_INFO_REPO_SIZE_MAP_DELTA),
varUInt64(backupData.backupInfoRepoSizeMapDelta));
}
jsonWriteUInt64(jsonWriteKeyZ(json, INFO_BACKUP_KEY_BACKUP_INFO_SIZE), backupData.backupInfoSize);
jsonWriteUInt64(jsonWriteKeyZ(json, INFO_BACKUP_KEY_BACKUP_INFO_SIZE_DELTA), backupData.backupInfoSizeDelta);
@ -395,6 +419,8 @@ infoBackupDataAdd(const InfoBackup *this, const Manifest *manifest)
uint64_t backupSizeDelta = 0;
uint64_t backupRepoSize = 0;
uint64_t backupRepoSizeDelta = 0;
uint64_t backupRepoSizeMap = 0;
uint64_t backupRepoSizeMapDelta = 0;
bool backupError = false;
for (unsigned int fileIdx = 0; fileIdx < manifestFileTotal(manifest); fileIdx++)
@ -403,12 +429,14 @@ infoBackupDataAdd(const InfoBackup *this, const Manifest *manifest)
backupSize += file.size;
backupRepoSize += file.sizeRepo > 0 ? file.sizeRepo : file.size;
backupRepoSizeMap += file.blockIncrMapSize;
// If a reference to a file exists, then it is in a previous backup and the delta calculation was already done
if (file.reference == NULL)
{
backupSizeDelta += file.size;
backupRepoSizeDelta += file.sizeRepo > 0 ? file.sizeRepo : file.size;
backupRepoSizeMapDelta += file.blockIncrMapSize;
}
// Is there an error in the file?
@ -449,6 +477,13 @@ infoBackupDataAdd(const InfoBackup *this, const Manifest *manifest)
.optionOnline = manData->backupOptionOnline,
};
// Add map sizes when block incr
if (manData->blockIncr)
{
infoBackupData.backupInfoRepoSizeMap = varNewUInt64(backupRepoSizeMap);
infoBackupData.backupInfoRepoSizeMapDelta = varNewUInt64(backupRepoSizeMapDelta);
}
if (manData->backupType != backupTypeFull)
{
// This list may not be sorted for manifests created before the reference list was added. Remove the last reference

View File

@ -51,6 +51,8 @@ typedef struct InfoBackupData
const String *backupArchiveStop;
uint64_t backupInfoRepoSize;
uint64_t backupInfoRepoSizeDelta;
const Variant *backupInfoRepoSizeMap;
const Variant *backupInfoRepoSizeMapDelta;
uint64_t backupInfoSize;
uint64_t backupInfoSizeDelta;
const String *backupLsnStart;

View File

@ -27,6 +27,9 @@ STRING_EXTERN(BACKUP_MANIFEST_FILE_STR, BACKUP_MANIF
STRING_EXTERN(MANIFEST_TARGET_PGDATA_STR, MANIFEST_TARGET_PGDATA);
STRING_EXTERN(MANIFEST_TARGET_PGTBLSPC_STR, MANIFEST_TARGET_PGTBLSPC);
// All block incremental sizes must be divisible by this factor
#define BLOCK_INCR_SIZE_FACTOR 8192
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
@ -98,6 +101,7 @@ typedef enum
manifestFilePackFlagChecksumRepo,
manifestFilePackFlagReference,
manifestFilePackFlagBundle,
manifestFilePackFlagBlockIncr,
manifestFilePackFlagCopy,
manifestFilePackFlagDelta,
manifestFilePackFlagResume,
@ -159,6 +163,9 @@ manifestFilePack(const Manifest *const manifest, const ManifestFile *const file)
if (file->bundleId != 0)
flag |= 1 << manifestFilePackFlagBundle;
if (file->blockIncrSize != 0)
flag |= 1 << manifestFilePackFlagBlockIncr;
if (file->mode != manifest->fileModeDefault)
flag |= 1 << manifestFilePackFlagMode;
@ -227,6 +234,15 @@ manifestFilePack(const Manifest *const manifest, const ManifestFile *const file)
cvtUInt64ToVarInt128(file->bundleOffset, buffer, &bufferPos, sizeof(buffer));
}
// Block incremental
if (flag & (1 << manifestFilePackFlagBlockIncr))
{
ASSERT(file->blockIncrSize % BLOCK_INCR_SIZE_FACTOR == 0);
cvtUInt64ToVarInt128(file->blockIncrSize / BLOCK_INCR_SIZE_FACTOR, buffer, &bufferPos, sizeof(buffer));
cvtUInt64ToVarInt128(file->blockIncrMapSize, buffer, &bufferPos, sizeof(buffer));
}
// Allocate memory for the file pack
const size_t nameSize = strSize(file->name) + 1;
@ -342,6 +358,13 @@ manifestFileUnpack(const Manifest *const manifest, const ManifestFilePack *const
result.bundleOffset = cvtUInt64FromVarInt128((const uint8_t *)filePack, &bufferPos, UINT_MAX);
}
// Block incremental
if (flag & (1 << manifestFilePackFlagBlockIncr))
{
result.blockIncrSize = cvtUInt64FromVarInt128((const uint8_t *)filePack, &bufferPos, UINT_MAX) * BLOCK_INCR_SIZE_FACTOR;
result.blockIncrMapSize = cvtUInt64FromVarInt128((const uint8_t *)filePack, &bufferPos, UINT_MAX);
}
// Checksum page error
result.checksumPageError = flag & (1 << manifestFilePackFlagChecksumPageError) ? true : false;
@ -726,6 +749,80 @@ manifestLinkCheck(const Manifest *this)
FUNCTION_LOG_RETURN_VOID();
}
/***********************************************************************************************************************************
Calculate block incremental size for a file. The block size is based on the size and age of the file. Larger files get larger block
sizes to reduce the cost of the map and individual block compression. Older files also get larger block sizes under the assumption
that they are unlikely to be modified if they have not been modified in a while. Very old and very small files skip block
incremental entirely.
The minimum practical block size is 128k. After that, the loss of compression efficiency becomes too expensive in terms of space.
***********************************************************************************************************************************/
// File size to block size map
static struct ManifestBuildBlockIncrSizeMap
{
uint32_t fileSize;
uint32_t blockSize;
} manifestBuildBlockIncrSizeMap[] =
{
{.fileSize = 1024 * 1024 * 1024, .blockSize = 1024 * 1024},
{.fileSize = 256 * 1024 * 1024, .blockSize = 768 * 1024},
{.fileSize = 64 * 1024 * 1024, .blockSize = 512 * 1024},
{.fileSize = 16 * 1024 * 1024, .blockSize = 384 * 1024},
{.fileSize = 4 * 1024 * 1024, .blockSize = 256 * 1024},
{.fileSize = 2 * 1024 * 1024, .blockSize = 192 * 1024},
{.fileSize = 128 * 1024, .blockSize = 128 * 1024},
};
// File age to block multiplier map
static struct ManifestBuildBlockIncrTimeMap
{
uint32_t fileAge;
uint32_t blockMultiplier;
} manifestBuildBlockIncrTimeMap[] =
{
{.fileAge = 4 * 7 * 86400, .blockMultiplier = 0},
{.fileAge = 2 * 7 * 86400, .blockMultiplier = 4},
{.fileAge = 7 * 86400, .blockMultiplier = 2},
};
static uint64_t
manifestBuildBlockIncrSize(const time_t timeStart, const ManifestFile *const file)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(TIME, timeStart);
FUNCTION_TEST_PARAM(MANIFEST_FILE, file);
FUNCTION_TEST_END();
uint64_t result = 0;
// Search size map for the appropriate block size
for (unsigned int sizeIdx = 0; sizeIdx < LENGTH_OF(manifestBuildBlockIncrSizeMap); sizeIdx++)
{
if (file->size >= manifestBuildBlockIncrSizeMap[sizeIdx].fileSize)
{
result = manifestBuildBlockIncrSizeMap[sizeIdx].blockSize;
break;
}
}
// If block size > 0 then search age map for a multiplier
if (result != 0)
{
const time_t fileAge = timeStart - file->timestamp;
for (unsigned int timeIdx = 0; timeIdx < LENGTH_OF(manifestBuildBlockIncrTimeMap); timeIdx++)
{
if (fileAge >= (time_t)manifestBuildBlockIncrTimeMap[timeIdx].fileAge)
{
result *= manifestBuildBlockIncrTimeMap[timeIdx].blockMultiplier;
break;
}
}
}
FUNCTION_TEST_RETURN(UINT64, result);
}
/**********************************************************************************************************************************/
typedef struct ManifestBuildData
{
@ -962,6 +1059,10 @@ manifestBuildInfo(
file.checksumSha1 = bufPtrConst(HASH_TYPE_SHA1_ZERO_BUF);
}
// Get block incremental size
if (info->size != 0 && buildData->manifest->pub.data.blockIncr)
file.blockIncrSize = manifestBuildBlockIncrSize(buildData->manifest->pub.data.backupTimestampStart, &file);
// Determine if this file should be page checksummed
if (dbPath && buildData->checksumPage)
{
@ -1158,16 +1259,19 @@ manifestBuildInfo(
FN_EXTERN Manifest *
manifestNewBuild(
const Storage *const storagePg, const unsigned int pgVersion, const unsigned int pgCatalogVersion, const bool online,
const bool checksumPage, const bool bundle, const StringList *const excludeList, const Pack *const tablespaceList)
const Storage *const storagePg, const unsigned int pgVersion, const unsigned int pgCatalogVersion, const time_t timestampStart,
const bool online, const bool checksumPage, const bool bundle, const bool blockIncr, const StringList *const excludeList,
const Pack *const tablespaceList)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STORAGE, storagePg);
FUNCTION_LOG_PARAM(UINT, pgVersion);
FUNCTION_LOG_PARAM(UINT, pgCatalogVersion);
FUNCTION_LOG_PARAM(TIME, timestampStart);
FUNCTION_LOG_PARAM(BOOL, online);
FUNCTION_LOG_PARAM(BOOL, checksumPage);
FUNCTION_LOG_PARAM(BOOL, bundle);
FUNCTION_LOG_PARAM(BOOL, blockIncr);
FUNCTION_LOG_PARAM(STRING_LIST, excludeList);
FUNCTION_LOG_PARAM(PACK, tablespaceList);
FUNCTION_LOG_END();
@ -1184,10 +1288,12 @@ manifestNewBuild(
this->pub.data.backrestVersion = strNewZ(PROJECT_VERSION);
this->pub.data.pgVersion = pgVersion;
this->pub.data.pgCatalogVersion = pgCatalogVersion;
this->pub.data.backupTimestampStart = timestampStart;
this->pub.data.backupType = backupTypeFull;
this->pub.data.backupOptionOnline = online;
this->pub.data.backupOptionChecksumPage = varNewBool(checksumPage);
this->pub.data.bundle = bundle;
this->pub.data.blockIncr = blockIncr;
MEM_CONTEXT_TEMP_BEGIN()
{
@ -1542,7 +1648,9 @@ manifestBuildIncr(Manifest *this, const Manifest *manifestPrior, BackupType type
{
const ManifestFile filePrior = manifestFileFind(manifestPrior, file.name);
if (file.copy && file.size == filePrior.size && (delta || file.size == 0 || file.timestamp == filePrior.timestamp))
if (file.copy &&
((filePrior.blockIncrMapSize > 0 && file.blockIncrSize > 0) ||
(file.size == filePrior.size && (delta || file.size == 0 || file.timestamp == filePrior.timestamp))))
{
file.sizeRepo = filePrior.sizeRepo;
file.checksumSha1 = filePrior.checksumSha1;
@ -1554,6 +1662,15 @@ manifestBuildIncr(Manifest *this, const Manifest *manifestPrior, BackupType type
file.bundleId = filePrior.bundleId;
file.bundleOffset = filePrior.bundleOffset;
// Copy block incr info if the file has a block incr size. It is possible for a file to shrink below the limit
// for block incr and lose the block incr map or block incr could be disabled. The block incr size needs to be
// copied from the prior file because it cannot change within a backup set without invalidating all prior maps.
if (file.blockIncrSize > 0)
{
file.blockIncrSize = filePrior.blockIncrSize;
file.blockIncrMapSize = filePrior.blockIncrMapSize;
}
// Perform delta if the file size is not zero
file.delta = delta && file.size != 0;
@ -1573,15 +1690,14 @@ manifestBuildIncr(Manifest *this, const Manifest *manifestPrior, BackupType type
/**********************************************************************************************************************************/
FN_EXTERN void
manifestBuildComplete(
Manifest *const this, const time_t timestampStart, const String *const lsnStart, const String *const archiveStart,
const time_t timestampStop, const String *const lsnStop, const String *const archiveStop, const unsigned int pgId,
const uint64_t pgSystemId, const Pack *const dbList, const bool optionArchiveCheck, const bool optionArchiveCopy,
const size_t optionBufferSize, const unsigned int optionCompressLevel, const unsigned int optionCompressLevelNetwork,
const bool optionHardLink, const unsigned int optionProcessMax, const bool optionStandby, const KeyValue *const annotation)
Manifest *const this, const String *const lsnStart, const String *const archiveStart, const time_t timestampStop,
const String *const lsnStop, const String *const archiveStop, const unsigned int pgId, const uint64_t pgSystemId,
const Pack *const dbList, const bool optionArchiveCheck, const bool optionArchiveCopy, const size_t optionBufferSize,
const unsigned int optionCompressLevel, const unsigned int optionCompressLevelNetwork, const bool optionHardLink,
const unsigned int optionProcessMax, const bool optionStandby, const KeyValue *const annotation)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(MANIFEST, this);
FUNCTION_LOG_PARAM(TIME, timestampStart);
FUNCTION_LOG_PARAM(STRING, lsnStart);
FUNCTION_LOG_PARAM(STRING, archiveStart);
FUNCTION_LOG_PARAM(TIME, timestampStop);
@ -1604,7 +1720,6 @@ manifestBuildComplete(
MEM_CONTEXT_BEGIN(this->pub.memContext)
{
// Save info
this->pub.data.backupTimestampStart = timestampStart;
this->pub.data.lsnStart = strDup(lsnStart);
this->pub.data.archiveStart = strDup(archiveStart);
this->pub.data.backupTimestampStop = timestampStop;
@ -1695,6 +1810,7 @@ manifestBuildComplete(
#define MANIFEST_KEY_ANNOTATION "annotation"
#define MANIFEST_KEY_BACKUP_ARCHIVE_START "backup-archive-start"
#define MANIFEST_KEY_BACKUP_ARCHIVE_STOP "backup-archive-stop"
#define MANIFEST_KEY_BACKUP_BLOCK_INCR "backup-block-incr"
#define MANIFEST_KEY_BACKUP_BUNDLE "backup-bundle"
#define MANIFEST_KEY_BACKUP_LABEL "backup-label"
#define MANIFEST_KEY_BACKUP_LSN_START "backup-lsn-start"
@ -1705,6 +1821,8 @@ manifestBuildComplete(
#define MANIFEST_KEY_BACKUP_TIMESTAMP_START "backup-timestamp-start"
#define MANIFEST_KEY_BACKUP_TIMESTAMP_STOP "backup-timestamp-stop"
#define MANIFEST_KEY_BACKUP_TYPE "backup-type"
#define MANIFEST_KEY_BLOCK_INCR_SIZE STRID5("bis", 0x4d220)
#define MANIFEST_KEY_BLOCK_INCR_MAP_SIZE STRID5("bims", 0x9b5220)
#define MANIFEST_KEY_BUNDLE_ID STRID5("bni", 0x25c20)
#define MANIFEST_KEY_BUNDLE_OFFSET STRID5("bno", 0x3dc20)
#define MANIFEST_KEY_CHECKSUM STRID5("checksum", 0x6d66b195030)
@ -1839,6 +1957,13 @@ manifestLoadCallback(void *callbackData, const String *const section, const Stri
JsonRead *const json = jsonReadNew(value);
jsonReadObjectBegin(json);
// Block incremental info
if (jsonReadKeyExpectStrId(json, MANIFEST_KEY_BLOCK_INCR_MAP_SIZE))
file.blockIncrMapSize = jsonReadUInt64(json);
if (jsonReadKeyExpectStrId(json, MANIFEST_KEY_BLOCK_INCR_SIZE))
file.blockIncrSize = jsonReadUInt64(json) * BLOCK_INCR_SIZE_FACTOR;
// Bundle info
if (jsonReadKeyExpectStrId(json, MANIFEST_KEY_BUNDLE_ID))
{
@ -2099,6 +2224,8 @@ manifestLoadCallback(void *callbackData, const String *const section, const Stri
manifest->pub.data.archiveStart = varStr(jsonToVar(value));
else if (strEqZ(key, MANIFEST_KEY_BACKUP_ARCHIVE_STOP))
manifest->pub.data.archiveStop = varStr(jsonToVar(value));
else if (strEqZ(key, MANIFEST_KEY_BACKUP_BLOCK_INCR))
manifest->pub.data.blockIncr = varBool(jsonToVar(value));
else if (strEqZ(key, MANIFEST_KEY_BACKUP_BUNDLE))
manifest->pub.data.bundle = varBool(jsonToVar(value));
else if (strEqZ(key, MANIFEST_KEY_BACKUP_LABEL))
@ -2339,6 +2466,13 @@ manifestSaveCallback(void *const callbackData, const String *const sectionNext,
jsonFromVar(VARSTR(manifest->pub.data.archiveStop)));
}
if (manifest->pub.data.blockIncr)
{
infoSaveValue(
infoSaveData, MANIFEST_SECTION_BACKUP, MANIFEST_KEY_BACKUP_BLOCK_INCR,
jsonFromVar(VARBOOL(manifest->pub.data.blockIncr)));
}
if (manifest->pub.data.bundle)
{
infoSaveValue(
@ -2557,6 +2691,16 @@ manifestSaveCallback(void *const callbackData, const String *const sectionNext,
const ManifestFile file = manifestFile(manifest, fileIdx);
JsonWrite *const json = jsonWriteObjectBegin(jsonWriteNewP());
// Block incremental info
if (file.blockIncrMapSize != 0)
jsonWriteUInt64(jsonWriteKeyStrId(json, MANIFEST_KEY_BLOCK_INCR_MAP_SIZE), file.blockIncrMapSize);
if (file.blockIncrSize != 0)
{
jsonWriteUInt64(
jsonWriteKeyStrId(json, MANIFEST_KEY_BLOCK_INCR_SIZE), file.blockIncrSize / BLOCK_INCR_SIZE_FACTOR);
}
// Bundle info
if (file.bundleId != 0)
{

View File

@ -54,6 +54,7 @@ typedef struct ManifestData
time_t backupTimestampStop; // When did the backup stop?
BackupType backupType; // Type of backup: full, diff, incr
bool bundle; // Does the backup bundle files?
bool blockIncr; // Does the backup perform block incremental?
// ??? Note that these fields are redundant and verbose since storing the start/stop lsn as a uint64 would be sufficient.
// However, we currently lack the functions to transform these values back and forth so this will do for now.
@ -113,6 +114,8 @@ typedef struct ManifestFile
const String *reference; // Reference to a prior backup
uint64_t bundleId; // Bundle id
uint64_t bundleOffset; // Bundle offset
uint64_t blockIncrSize; // Size of incremental blocks
uint64_t blockIncrMapSize; // Block incremental map size
uint64_t size; // Original size
uint64_t sizeRepo; // Size in repo
time_t timestamp; // Original timestamp
@ -164,8 +167,8 @@ Constructors
***********************************************************************************************************************************/
// Build a new manifest for a PostgreSQL data directory
FN_EXTERN Manifest *manifestNewBuild(
const Storage *storagePg, unsigned int pgVersion, unsigned int pgCatalogVersion, bool online, bool checksumPage, bool bundle,
const StringList *excludeList, const Pack *tablespaceList);
const Storage *storagePg, unsigned int pgVersion, unsigned int pgCatalogVersion, time_t timestampStart, bool online,
bool checksumPage, bool bundle, bool blockIncr, const StringList *excludeList, const Pack *tablespaceList);
// Load a manifest from IO
FN_EXTERN Manifest *manifestNewLoad(IoRead *read);
@ -227,11 +230,10 @@ FN_EXTERN void manifestBuildIncr(Manifest *this, const Manifest *prior, BackupTy
// Set remaining values before the final save
FN_EXTERN void manifestBuildComplete(
Manifest *this, time_t timestampStart, const String *lsnStart, const String *archiveStart, time_t timestampStop,
const String *lsnStop, const String *archiveStop, unsigned int pgId, uint64_t pgSystemId, const Pack *dbList,
bool optionArchiveCheck, bool optionArchiveCopy, size_t optionBufferSize, unsigned int optionCompressLevel,
unsigned int optionCompressLevelNetwork, bool optionHardLink, unsigned int optionProcessMax, bool optionStandby,
const KeyValue *annotation);
Manifest *this, const String *lsnStart, const String *archiveStart, time_t timestampStop, const String *lsnStop,
const String *archiveStop, unsigned int pgId, uint64_t pgSystemId, const Pack *dbList, bool optionArchiveCheck,
bool optionArchiveCopy, size_t optionBufferSize, unsigned int optionCompressLevel, unsigned int optionCompressLevelNetwork,
bool optionHardLink, unsigned int optionProcessMax, bool optionStandby, const KeyValue *annotation);
/***********************************************************************************************************************************
Functions

View File

@ -123,6 +123,8 @@ src_pgbackrest = [
'command/archive/push/protocol.c',
'command/archive/push/push.c',
'command/backup/backup.c',
'command/backup/blockIncr.c',
'command/backup/blockMap.c',
'command/backup/common.c',
'command/backup/pageChecksum.c',
'command/backup/protocol.c',
@ -144,6 +146,7 @@ src_pgbackrest = [
'command/repo/ls.c',
'command/repo/put.c',
'command/repo/rm.c',
'command/restore/deltaMap.c',
'command/restore/file.c',
'command/restore/protocol.c',
'command/restore/restore.c',
@ -176,10 +179,12 @@ src_pgbackrest = [
'common/exec.c',
'common/fork.c',
'common/ini.c',
'common/io/chunkedRead.c',
'common/io/client.c',
'common/io/fd.c',
'common/io/fdRead.c',
'common/io/fdWrite.c',
'common/io/filter/chunk.c',
'common/io/filter/size.c',
'common/io/http/client.c',
'common/io/http/common.c',

View File

@ -269,17 +269,19 @@ unit:
# ----------------------------------------------------------------------------------------------------------------------------
- name: io
total: 5
total: 6
feature: IO
harness: pack
coverage:
- common/io/bufferRead
- common/io/bufferWrite
- common/io/chunkedRead
- common/io/fd
- common/io/fdRead
- common/io/fdWrite
- common/io/filter/buffer
- common/io/filter/chunk
- common/io/filter/filter
- common/io/filter/group
- common/io/filter/sink
@ -815,11 +817,13 @@ unit:
# ----------------------------------------------------------------------------------------------------------------------------
- name: backup
total: 11
total: 13
harness: backup
coverage:
- command/backup/backup
- command/backup/blockIncr
- command/backup/blockMap
- command/backup/common
- command/backup/file
- command/backup/pageChecksum
@ -832,9 +836,10 @@ unit:
# ----------------------------------------------------------------------------------------------------------------------------
- name: restore
total: 12
total: 14
coverage:
- command/restore/deltaMap
- command/restore/file
- command/restore/protocol
- command/restore/restore

View File

@ -1108,6 +1108,11 @@ sub configCreate
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-bundle-limit'} = '64KiB';
}
if ($oParam->{bBlockIncr})
{
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-block'} = 'y';
}
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'log-path'} = $self->logPath();
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'lock-path'} = $self->lockPath();
@ -2006,13 +2011,17 @@ sub restoreCompare
${$oExpectedManifestRef}{&MANIFEST_SECTION_TARGET_FILE}{$strName}{size});
}
# Remove repo-size, bno, bni from the manifest
# Remove repo-size, bno, bni, bims, bis from the manifest
$oActualManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strName, MANIFEST_SUBKEY_REPO_SIZE);
delete($oExpectedManifestRef->{&MANIFEST_SECTION_TARGET_FILE}{$strName}{&MANIFEST_SUBKEY_REPO_SIZE});
$oActualManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strName, "bni");
delete($oExpectedManifestRef->{&MANIFEST_SECTION_TARGET_FILE}{$strName}{"bni"});
$oActualManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strName, "bno");
delete($oExpectedManifestRef->{&MANIFEST_SECTION_TARGET_FILE}{$strName}{"bno"});
$oActualManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strName, "bims");
delete($oExpectedManifestRef->{&MANIFEST_SECTION_TARGET_FILE}{$strName}{"bims"});
$oActualManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strName, "bis");
delete($oExpectedManifestRef->{&MANIFEST_SECTION_TARGET_FILE}{$strName}{"bis"});
if ($oActualManifest->get(MANIFEST_SECTION_TARGET_FILE, $strName, MANIFEST_SUBKEY_SIZE) != 0)
{
@ -2153,6 +2162,10 @@ sub restoreCompare
$oExpectedManifestRef->{&MANIFEST_SECTION_BACKUP}{'backup-bundle'});
}
# Delete block incr headers since old Perl manifest code will not generate them
delete($oExpectedManifestRef->{&MANIFEST_SECTION_BACKUP}{'backup-block-incr'});
delete($oExpectedManifestRef->{&MANIFEST_SECTION_BACKUP}{'backup-block-incr-size'});
$oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LSN_START, undef,
${$oExpectedManifestRef}{&MANIFEST_SECTION_BACKUP}{&MANIFEST_KEY_LSN_START});
$oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LSN_STOP, undef,

View File

@ -146,7 +146,8 @@ sub setup
bArchiveAsync => $$oConfigParam{bArchiveAsync},
strStorage => $oConfigParam->{strStorage},
iRepoTotal => $oConfigParam->{iRepoTotal},
bBundle => $oConfigParam->{bBundle}});
bBundle => $oConfigParam->{bBundle},
bBlockIncr => $oConfigParam->{bBlockIncr}});
# Create backup config if backup host exists
if (defined($oHostBackup))
@ -157,7 +158,8 @@ sub setup
bHardlink => $$oConfigParam{bHardLink},
strStorage => $oConfigParam->{strStorage},
iRepoTotal => $oConfigParam->{iRepoTotal},
bBundle => $oConfigParam->{bBundle}});
bBundle => $oConfigParam->{bBundle},
bBlockIncr => $oConfigParam->{bBlockIncr}});
}
# If backup host is not defined set it to db-primary
else
@ -186,7 +188,8 @@ sub setup
bArchiveAsync => $$oConfigParam{bArchiveAsync},
strStorage => $oConfigParam->{strStorage},
iRepoTotal => $oConfigParam->{iRepoTotal},
bBundle => $oConfigParam->{bBundle}});
bBundle => $oConfigParam->{bBundle},
bBlockIncr => $oConfigParam->{bBlockIncr}});
}
# Create object storage

View File

@ -52,16 +52,16 @@ sub run
foreach my $rhRun
(
{pg => '9.3', dst => 'backup', tls => 0, stg => AZURE, enc => 0, cmp => NONE, rt => 2, bnd => 0},
{pg => '9.4', dst => 'db-standby', tls => 0, stg => POSIX, enc => 1, cmp => LZ4, rt => 1, bnd => 1},
{pg => '9.5', dst => 'backup', tls => 1, stg => S3, enc => 0, cmp => BZ2, rt => 1, bnd => 0},
{pg => '9.6', dst => 'backup', tls => 0, stg => POSIX, enc => 0, cmp => NONE, rt => 2, bnd => 1},
{pg => '10', dst => 'db-standby', tls => 1, stg => GCS, enc => 1, cmp => GZ, rt => 2, bnd => 0},
{pg => '11', dst => 'backup', tls => 1, stg => AZURE, enc => 0, cmp => ZST, rt => 2, bnd => 1},
{pg => '12', dst => 'backup', tls => 0, stg => S3, enc => 1, cmp => LZ4, rt => 1, bnd => 0},
{pg => '13', dst => 'db-standby', tls => 1, stg => GCS, enc => 0, cmp => ZST, rt => 1, bnd => 1},
{pg => '14', dst => 'backup', tls => 0, stg => POSIX, enc => 1, cmp => LZ4, rt => 2, bnd => 0},
{pg => '15', dst => 'db-standby', tls => 0, stg => AZURE, enc => 0, cmp => NONE, rt => 2, bnd => 1},
{pg => '9.3', dst => 'backup', tls => 0, stg => AZURE, enc => 0, cmp => NONE, rt => 2, bnd => 0, bi => 0},
{pg => '9.4', dst => 'db-standby', tls => 0, stg => POSIX, enc => 1, cmp => LZ4, rt => 1, bnd => 1, bi => 0},
{pg => '9.5', dst => 'backup', tls => 1, stg => S3, enc => 0, cmp => BZ2, rt => 1, bnd => 0, bi => 1},
{pg => '9.6', dst => 'backup', tls => 0, stg => POSIX, enc => 0, cmp => NONE, rt => 2, bnd => 1, bi => 1},
{pg => '10', dst => 'db-standby', tls => 1, stg => GCS, enc => 1, cmp => GZ, rt => 2, bnd => 0, bi => 0},
{pg => '11', dst => 'backup', tls => 1, stg => AZURE, enc => 0, cmp => ZST, rt => 2, bnd => 1, bi => 0},
{pg => '12', dst => 'backup', tls => 0, stg => S3, enc => 1, cmp => LZ4, rt => 1, bnd => 0, bi => 1},
{pg => '13', dst => 'db-standby', tls => 1, stg => GCS, enc => 0, cmp => ZST, rt => 1, bnd => 1, bi => 1},
{pg => '14', dst => 'backup', tls => 0, stg => POSIX, enc => 1, cmp => LZ4, rt => 2, bnd => 0, bi => 0},
{pg => '15', dst => 'db-standby', tls => 0, stg => AZURE, enc => 0, cmp => NONE, rt => 2, bnd => 1, bi => 1},
)
{
# Only run tests for this pg version
@ -76,6 +76,7 @@ sub run
my $strCompressType = $rhRun->{cmp};
my $iRepoTotal = $rhRun->{rt};
my $bBundle = $rhRun->{bnd};
my $bBlockIncr = $rhRun->{bi};
# Some tests are not version specific so only run them on a single version of PostgreSQL
my $bNonVersionSpecific = $self->pgVersion() eq PG_VERSION_96;
@ -90,7 +91,7 @@ sub run
false,
{bHostBackup => $bHostBackup, bStandby => true, bTls => $bTls, strBackupDestination => $strBackupDestination,
strCompressType => $strCompressType, bArchiveAsync => false, strStorage => $strStorage,
bRepoEncrypt => $bRepoEncrypt, iRepoTotal => $iRepoTotal, bBundle => $bBundle});
bRepoEncrypt => $bRepoEncrypt, iRepoTotal => $iRepoTotal, bBundle => $bBundle, bBlockIncr => $bBlockIncr});
# Some commands will fail because of the bogus host created when a standby is present. These options reset the bogus host
# so it won't interfere with commands that won't tolerate a connection failure.

File diff suppressed because it is too large Load Diff

View File

@ -675,8 +675,8 @@ testRun(void)
"\"backup-annotation\":{\"extra key\":\"this is an annotation\",\"source\":\"this is another annotation\"},"
"\"backup-archive-start\":\"000000010000000000000005\",\"backup-archive-stop\":\"000000010000000000000005\","
"\"backup-error\":false,\"backup-info-repo-size\":2369186,"
"\"backup-info-repo-size-delta\":346,\"backup-info-size\":20162900,\"backup-info-size-delta\":8428,"
"\"backup-lsn-start\":\"285/89000028\","
"\"backup-info-repo-size-delta\":346,\"backup-info-repo-size-map\":100,\"backup-info-repo-size-map-delta\":12"
",\"backup-info-size\":20162900,\"backup-info-size-delta\":8428,\"backup-lsn-start\":\"285/89000028\","
"\"backup-prior\":\"20201116-155000F\",\"backup-reference\":[\"20201116-155000F\"],"
"\"backup-timestamp-start\":1605799260,\"backup-timestamp-stop\":1605799263,\"backup-type\":\"incr\","
"\"db-id\":2,\"option-archive-check\":true,\"option-archive-copy\":false,\"option-backup-standby\":false,"
@ -1251,7 +1251,9 @@ testRun(void)
"\"delta\":8428,"
"\"repository\":{"
"\"delta\":346,"
"\"size\":2369186"
"\"delta-map\":12,"
"\"size\":2369186,"
"\"size-map\":100"
"},"
"\"size\":20162900"
"},"

View File

@ -1,12 +1,17 @@
/***********************************************************************************************************************************
Test Restore Command
***********************************************************************************************************************************/
#include "command/stanza/create.h"
#include "command/backup/backup.h"
#include "command/backup/blockIncr.h"
#include "command/backup/protocol.h"
#include "common/compress/helper.h"
#include "common/crypto/cipherBlock.h"
#include "postgres/version.h"
#include "storage/posix/storage.h"
#include "storage/helper.h"
#include "common/harnessBackup.h"
#include "common/harnessConfig.h"
#include "common/harnessInfo.h"
#include "common/harnessPostgres.h"
@ -141,12 +146,59 @@ testRun(void)
FUNCTION_HARNESS_VOID();
// Install local command handler shim
static const ProtocolServerHandler testLocalHandlerList[] = {PROTOCOL_SERVER_HANDLER_RESTORE_LIST};
static const ProtocolServerHandler testLocalHandlerList[] =
{PROTOCOL_SERVER_HANDLER_BACKUP_LIST PROTOCOL_SERVER_HANDLER_RESTORE_LIST};
hrnProtocolLocalShimInstall(testLocalHandlerList, LENGTH_OF(testLocalHandlerList));
// Create default storage object for testing
Storage *storageTest = storagePosixNewP(TEST_PATH_STR, .write = true);
// *****************************************************************************************************************************
if (testBegin("DeltaMap"))
{
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("too large for one buffer");
Buffer *output = bufNew(0);
IoWrite *write = ioBufferWriteNew(output);
ioFilterGroupAdd(ioWriteFilterGroup(write), deltaMapNew(3));
ioWriteOpen(write);
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("ABCDEF")), "write");
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("ABC")), "write");
TEST_RESULT_VOID(ioWriteClose(write), "close");
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, pckReadBinP(ioFilterGroupResultP(ioWriteFilterGroup(write), DELTA_MAP_FILTER_TYPE))),
"3c01bdbb26f358bab27f267924aa2c9a03fcfdb8"
"6dae29c06c5f04601445c493156d10fe1be23b6d"
"3c01bdbb26f358bab27f267924aa2c9a03fcfdb8",
"delta map");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("buffer smaller than block and remainder");
output = bufNew(0);
write = ioBufferWriteNew(output);
ioFilterGroupAdd(ioWriteFilterGroup(write), deltaMapNew(3));
ioWriteOpen(write);
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("DE")), "write");
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("FA")), "write");
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("BC")), "write");
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("AB")), "write");
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("CX")), "write");
TEST_RESULT_VOID(ioWriteClose(write), "close");
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, pckReadBinP(ioFilterGroupResultP(ioWriteFilterGroup(write), DELTA_MAP_FILTER_TYPE))),
"6dae29c06c5f04601445c493156d10fe1be23b6d"
"3c01bdbb26f358bab27f267924aa2c9a03fcfdb8"
"3c01bdbb26f358bab27f267924aa2c9a03fcfdb8"
"c032adc1ff629c9b66f22749ad667e6beadf144b",
"delta map");
}
// *****************************************************************************************************************************
if (testBegin("restoreFile()"))
{
@ -191,7 +243,7 @@ testRun(void)
TEST_ERROR(
restoreFile(
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, compressTypeGz,
0, false, false, STRDEF("badpass"), fileList),
0, false, false, STRDEF("badpass"), NULL, fileList),
ChecksumError,
"error restoring 'normal': actual checksum 'd1cd8a7d11daa26814b93eb604e1d49ab4b43770' does not match expected checksum"
" 'ffffffffffffffffffffffffffffffffffffffff'");
@ -2452,12 +2504,14 @@ testRun(void)
manifest->pub.data.pgVersion = PG_VERSION_10;
manifest->pub.data.pgCatalogVersion = hrnPgCatalogVersion(PG_VERSION_10);
manifest->pub.data.backupType = backupTypeIncr;
manifest->pub.data.blockIncr = true;
manifest->pub.data.backupTimestampCopyStart = 1482182861; // So file timestamps should be less than this
manifest->pub.referenceList = strLstNew();
strLstAddZ(manifest->pub.referenceList, TEST_LABEL_FULL);
strLstAddZ(manifest->pub.referenceList, TEST_LABEL_DIFF);
strLstAddZ(manifest->pub.referenceList, TEST_LABEL_INCR);
strLstAddZ(manifest->pub.referenceList, TEST_LABEL);
// Data directory
manifestTargetAdd(manifest, &(ManifestTarget){.name = MANIFEST_TARGET_PGDATA_STR, .path = pgPath});
@ -2717,6 +2771,83 @@ testRun(void)
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("401215e092779574988a854d8c7caed7f91dba4b")))});
HRN_STORAGE_PUT_Z(storageRepoWrite(), TEST_REPO_PATH "pg_hba.conf", "PG_HBA.CONF");
// Block incremental with no references to a prior backup
fileBuffer = bufNew(8192 * 3);
memset(bufPtr(fileBuffer), 1, 8192);
memset(bufPtr(fileBuffer) + 8192, 2, 8192);
memset(bufPtr(fileBuffer) + 16384, 3, 8192);
bufUsedSet(fileBuffer, bufSize(fileBuffer));
IoWrite *write = storageWriteIo(storageNewWriteP(storageRepoWrite(), STRDEF(TEST_REPO_PATH "base/1/bi-no-ref.pgbi")));
ioFilterGroupAdd(ioWriteFilterGroup(write), blockIncrNew(8192, 3, 0, 0, NULL, NULL, NULL));
ioFilterGroupAdd(ioWriteFilterGroup(write), ioSizeNew());
ioWriteOpen(write);
ioWrite(write, fileBuffer);
ioWriteClose(write);
uint64_t blockIncrMapSize = pckReadU64P(ioFilterGroupResultP(ioWriteFilterGroup(write), BLOCK_INCR_FILTER_TYPE));
uint64_t repoSize = pckReadU64P(ioFilterGroupResultP(ioWriteFilterGroup(write), SIZE_FILTER_TYPE));
manifestFileAdd(
manifest,
&(ManifestFile){
.name = STRDEF(TEST_PGDATA "base/1/bi-no-ref"), .size = bufUsed(fileBuffer), .sizeRepo = repoSize,
.blockIncrSize = 8192, .blockIncrMapSize = blockIncrMapSize, .timestamp = 1482182860, .mode = 0600,
.group = groupName(), .user = userName(),
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("953cdcc904c5d4135d96fc0833f121bf3033c74c")))});
// Block incremental with a broken reference to show that unneeded references will not be used
Buffer *fileUnused = bufNew(8192 * 6);
memset(bufPtr(fileUnused), 1, bufSize(fileUnused));
bufUsedSet(fileUnused, bufSize(fileUnused));
Buffer *fileUnusedMap = bufNew(0);
write = ioBufferWriteNew(fileUnusedMap);
ioFilterGroupAdd(ioWriteFilterGroup(write), blockIncrNew(8192, 0, 0, 0, NULL, NULL, NULL));
ioWriteOpen(write);
ioWrite(write, fileUnused);
ioWriteClose(write);
size_t fileUnusedMapSize = pckReadU64P(ioFilterGroupResultP(ioWriteFilterGroup(write), BLOCK_INCR_FILTER_TYPE));
Buffer *fileUsed = bufDup(fileUnused);
memset(bufPtr(fileUsed), 3, 8192);
memset(bufPtr(fileUsed) + (8192 * 2), 3, 24576);
size_t bufferSizeOld = ioBufferSize();
ioBufferSizeSet(777);
write = storageWriteIo(storageNewWriteP(storageRepoWrite(), STRDEF(TEST_REPO_PATH "base/1/bi-unused-ref.pgbi")));
ioFilterGroupAdd(
ioWriteFilterGroup(write),
blockIncrNew(
8192, 3, 0, 0, BUF(bufPtr(fileUnusedMap) + bufUsed(fileUnusedMap) - fileUnusedMapSize, fileUnusedMapSize),
NULL, NULL));
ioFilterGroupAdd(ioWriteFilterGroup(write), ioSizeNew());
ioWriteOpen(write);
ioWrite(write, fileUsed);
ioWriteClose(write);
ioBufferSizeSet(bufferSizeOld);
uint64_t fileUsedMapSize = pckReadU64P(ioFilterGroupResultP(ioWriteFilterGroup(write), BLOCK_INCR_FILTER_TYPE));
uint64_t fileUsedRepoSize = pckReadU64P(ioFilterGroupResultP(ioWriteFilterGroup(write), SIZE_FILTER_TYPE));
manifestFileAdd(
manifest,
&(ManifestFile){
.name = STRDEF(TEST_PGDATA "base/1/bi-unused-ref"), .size = bufUsed(fileUsed), .sizeRepo = fileUsedRepoSize,
.blockIncrSize = 8192, .blockIncrMapSize = fileUsedMapSize, .timestamp = 1482182860, .mode = 0600,
.group = groupName(), .user = userName(),
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("febd680181d4cd315dce942348862c25fbd731f3")))});
memset(bufPtr(fileUnused) + (8192 * 4), 3, 8192);
HRN_STORAGE_PATH_CREATE(storagePgWrite(), "base/1", .mode = 0700);
HRN_STORAGE_PUT(storagePgWrite(), "base/1/bi-unused-ref", fileUnused, .modeFile = 0600);
// tablespace_map (will be ignored during restore)
manifestFileAdd(
manifest,
@ -2808,15 +2939,17 @@ testRun(void)
"P00 DETAIL: remove invalid path '" TEST_PATH "/pg/global/bogus3'\n"
"P00 DETAIL: remove invalid link '" TEST_PATH "/pg/pg_wal2'\n"
"P00 DETAIL: remove invalid file '" TEST_PATH "/pg/tablespace_map'\n"
"P00 DETAIL: create path '" TEST_PATH "/pg/base'\n"
"P00 DETAIL: create path '" TEST_PATH "/pg/base/1'\n"
"P00 DETAIL: create path '" TEST_PATH "/pg/base/16384'\n"
"P00 DETAIL: create path '" TEST_PATH "/pg/base/32768'\n"
"P00 DETAIL: create symlink '" TEST_PATH "/pg/pg_xact' to '../xact'\n"
"P00 DETAIL: create symlink '" TEST_PATH "/pg/pg_hba.conf' to '../config/pg_hba.conf'\n"
"P00 DETAIL: create symlink '" TEST_PATH "/pg/postgresql.conf' to '../config/postgresql.conf'\n"
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/bi-unused-ref (48KB, [PCT]) checksum"
" febd680181d4cd315dce942348862c25fbd731f3\n"
"P01 DETAIL: restore file " TEST_PATH "/pg/base/32768/32769 (32KB, [PCT]) checksum"
" a40f0986acb1531ce0cc75a23dcf8aa406ae9081\n"
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/bi-no-ref (24KB, [PCT]) checksum"
" 953cdcc904c5d4135d96fc0833f121bf3033c74c\n"
"P01 DETAIL: restore file " TEST_PATH "/pg/base/16384/16385 (16KB, [PCT]) checksum"
" d74e5f7ebe52a3ed468ba08c5b6aefaccd1ca88f\n"
"P01 DETAIL: restore file " TEST_PATH "/pg/global/pg_control.pgbackrest.tmp (8KB, [PCT])"
@ -2866,7 +2999,7 @@ testRun(void)
"P00 DETAIL: sync path '" TEST_PATH "/pg/pg_tblspc/1/PG_10_201707211'\n"
"P00 INFO: restore global/pg_control (performed last to ensure aborted restores cannot be started)\n"
"P00 DETAIL: sync path '" TEST_PATH "/pg/global'\n"
"P00 INFO: restore size = [SIZE], file total = 21");
"P00 INFO: restore size = [SIZE], file total = 23");
TEST_STORAGE_LIST(
storagePg(), NULL,
@ -2881,6 +3014,8 @@ testRun(void)
"base/1/30 {s=1, t=1482182860}\n"
"base/1/31 {s=1, t=1482182860}\n"
"base/1/PG_VERSION {s=4, t=1482182860}\n"
"base/1/bi-no-ref {s=24576, t=1482182860}\n"
"base/1/bi-unused-ref {s=49152, t=1482182860}\n"
"base/16384/\n"
"base/16384/16385 {s=16384, t=1482182860}\n"
"base/16384/PG_VERSION {s=4, t=1482182860}\n"
@ -3004,7 +3139,11 @@ testRun(void)
"P00 DETAIL: create symlink '" TEST_PATH "/pg/pg_wal' to '../wal'\n"
"P00 DETAIL: create path '" TEST_PATH "/pg/pg_xact'\n"
"P00 DETAIL: create symlink '" TEST_PATH "/pg/pg_hba.conf' to '../config/pg_hba.conf'\n"
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/bi-unused-ref - exists and matches backup (48KB, [PCT]) checksum"
" febd680181d4cd315dce942348862c25fbd731f3\n"
"P01 DETAIL: restore zeroed file " TEST_PATH "/pg/base/32768/32769 (32KB, [PCT])\n"
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/bi-no-ref - exists and matches backup (24KB, [PCT]) checksum"
" 953cdcc904c5d4135d96fc0833f121bf3033c74c\n"
"P01 DETAIL: restore file " TEST_PATH "/pg/base/16384/16385 - exists and matches backup (16KB, [PCT])"
" checksum d74e5f7ebe52a3ed468ba08c5b6aefaccd1ca88f\n"
"P01 DETAIL: restore file " TEST_PATH "/pg/global/pg_control.pgbackrest.tmp (8KB, [PCT])"
@ -3054,7 +3193,7 @@ testRun(void)
"P00 DETAIL: sync path '" TEST_PATH "/pg/pg_tblspc/1/PG_10_201707211'\n"
"P00 INFO: restore global/pg_control (performed last to ensure aborted restores cannot be started)\n"
"P00 DETAIL: sync path '" TEST_PATH "/pg/global'\n"
"P00 INFO: restore size = [SIZE], file total = 21");
"P00 INFO: restore size = [SIZE], file total = 23");
// Check stanza archive spool path was removed
TEST_STORAGE_LIST_EMPTY(storageSpool(), STORAGE_PATH_ARCHIVE);
@ -3081,5 +3220,85 @@ testRun(void)
protocolFree();
}
// *****************************************************************************************************************************
if (testBegin("cmdBackup() and cmdRestore()"))
{
const String *pgPath = STRDEF(TEST_PATH "/pg");
const String *repoPath = STRDEF(TEST_PATH "/repo");
// Created pg_control
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_15, .pageChecksum = false);
// Create encrypted stanza
StringList *argList = strLstNew();
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
hrnCfgArgRaw(argList, cfgOptPgPath, pgPath);
hrnCfgArgRawBool(argList, cfgOptOnline, false);
hrnCfgArgRawZ(argList, cfgOptRepoCipherType, "aes-256-cbc");
hrnCfgEnvRawZ(cfgOptRepoCipherPass, TEST_CIPHER_PASS);
HRN_CFG_LOAD(cfgCmdStanzaCreate, argList);
TEST_RESULT_VOID(cmdStanzaCreate(), "stanza create");
// It is better to put as few tests here as possible because cmp/enc makes tests more expensive (especially with valgrind)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("full backup with block incr");
// Zeroed file large enough to use block incr
Buffer *relation = bufNew(manifestBuildBlockIncrSizeMap[LENGTH_OF(manifestBuildBlockIncrSizeMap) - 1].fileSize * 2);
memset(bufPtr(relation), 0, bufSize(relation));
bufUsedSet(relation, bufSize(relation));
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/2", relation);
// Add postgresql.auto.conf to contain recovery settings
HRN_STORAGE_PUT_EMPTY(storagePgWrite(), PG_FILE_POSTGRESQLAUTOCONF);
// Backup
argList = strLstNew();
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
hrnCfgArgRaw(argList, cfgOptPgPath, pgPath);
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeFull);
hrnCfgArgRawBool(argList, cfgOptRepoBundle, true);
hrnCfgArgRawBool(argList, cfgOptRepoBlock, true);
hrnCfgArgRawBool(argList, cfgOptOnline, false);
hrnCfgArgRawZ(argList, cfgOptRepoCipherType, "aes-256-cbc");
hrnCfgEnvRawZ(cfgOptRepoCipherPass, TEST_CIPHER_PASS);
HRN_CFG_LOAD(cfgCmdBackup, argList);
TEST_RESULT_VOID(hrnCmdBackup(), "backup");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("restore with block incr");
// Remove all files from pg path
HRN_STORAGE_PATH_REMOVE(storagePgWrite(), NULL, .recurse = true);
argList = strLstNew();
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
hrnCfgArgRaw(argList, cfgOptPgPath, pgPath);
hrnCfgArgRawZ(argList, cfgOptSpoolPath, TEST_PATH "/spool");
hrnCfgArgRawZ(argList, cfgOptRepoCipherType, "aes-256-cbc");
hrnCfgEnvRawZ(cfgOptRepoCipherPass, TEST_CIPHER_PASS);
HRN_CFG_LOAD(cfgCmdRestore, argList);
TEST_RESULT_VOID(cmdRestore(), "restore");
TEST_STORAGE_LIST(
storagePg(), NULL,
"base/\n"
"base/1/\n"
"base/1/2\n"
"global/\n"
"global/pg_control\n"
"postgresql.auto.conf\n"
"recovery.signal\n",
.level = storageInfoLevelType);
}
FUNCTION_HARNESS_RETURN_VOID();
}

View File

@ -771,5 +771,46 @@ testRun(void)
TRY_END();
}
// *****************************************************************************************************************************
if (testBegin("IoChunkedRead and ioChunkedWrite"))
{
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("write chunks");
ioBufferSizeSet(3);
Buffer *destination = bufNew(256);
IoWrite *write = ioBufferWriteNew(destination);
ioFilterGroupAdd(ioWriteFilterGroup(write), ioChunkNew());
ioWriteOpen(write);
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("ABC")), "write");
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("DEF")), "write");
TEST_RESULT_VOID(ioWriteClose(write), "close");
TEST_RESULT_STR_Z(strNewEncode(encodingHex, destination), "034142430144454600", "check");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("read chunks");
ioBufferSizeSet(2);
IoRead *read = ioChunkedReadNew(ioBufferReadNewOpen(destination));
ioReadOpen(read);
Buffer *actual = bufNew(3);
TEST_RESULT_UINT(ioRead(read, actual), 3, "read");
TEST_RESULT_STR_Z(strNewBuf(actual), "ABC", "check");
actual = bufNew(1);
TEST_RESULT_UINT(ioRead(read, actual), 1, "read");
TEST_RESULT_STR_Z(strNewBuf(actual), "D", "check");
actual = bufNew(3);
TEST_RESULT_UINT(ioRead(read, actual), 2, "read");
TEST_RESULT_STR_Z(strNewBuf(actual), "EF", "check");
actual = bufNew(2);
TEST_RESULT_UINT(ioRead(read, actual), 0, "eof");
}
FUNCTION_HARNESS_RETURN_VOID();
}

View File

@ -1197,6 +1197,16 @@ testRun(void)
configParse(storageTest, strLstSize(argList), strLstPtr(argList), false), OptionInvalidError,
"option 'force' not valid without option 'no-online'");
argList = strLstNew();
strLstAddZ(argList, TEST_BACKREST_EXE);
hrnCfgArgRawZ(argList, cfgOptPgPath, "/path/to/db");
hrnCfgArgRawZ(argList, cfgOptStanza, "db");
hrnCfgArgRawBool(argList, cfgOptRepoBlock, true);
strLstAddZ(argList, TEST_COMMAND_BACKUP);
TEST_ERROR(
configParse(storageTest, strLstSize(argList), strLstPtr(argList), false), OptionInvalidError,
"option 'repo1-block' not valid without option 'repo1-bundle'");
argList = strLstNew();
strLstAddZ(argList, TEST_BACKREST_EXE);
hrnCfgArgRawZ(argList, cfgOptPgPath, "/path/to/db");

View File

@ -137,7 +137,8 @@ testRun(void)
"\"option-checksum-page\":false,\"option-compress\":true,\"option-hardlink\":false,\"option-online\":true}\n"
"20161219-212741F_20161219-212918I={\"backrest-format\":5,\"backrest-version\":\"2.04\","
"\"backup-archive-start\":null,\"backup-archive-stop\":null,"
"\"backup-info-repo-size\":3159811,\"backup-info-repo-size-delta\":15765,\"backup-info-size\":26897030,"
"\"backup-info-repo-size\":3159811,\"backup-info-repo-size-delta\":15765,\"backup-info-repo-size-map\":100,"
"\"backup-info-repo-size-map-delta\":12,\"backup-info-size\":26897030,"
"\"backup-info-size-delta\":163866,\"backup-prior\":\"20161219-212741F\",\"backup-reference\":[\"20161219-212741F\","
"\"20161219-212741F_20161219-212803D\"],"
"\"backup-timestamp-start\":1482182877,\"backup-timestamp-stop\":1482182883,\"backup-type\":\"incr\",\"db-id\":1,"
@ -198,6 +199,8 @@ testRun(void)
TEST_RESULT_STR(backupData.backupArchiveStart, NULL, "archive start NULL");
TEST_RESULT_STR(backupData.backupArchiveStop, NULL, "archive stop NULL");
TEST_RESULT_UINT(backupData.backupType, backupTypeIncr, "backup type incr");
TEST_RESULT_UINT(varUInt64(backupData.backupInfoRepoSizeMap), 100, "repo map size");
TEST_RESULT_UINT(varUInt64(backupData.backupInfoRepoSizeMapDelta), 12, "repo map size delta");
TEST_RESULT_STR_Z(backupData.backupPrior, "20161219-212741F", "backup prior exists");
TEST_RESULT_BOOL(
(strLstSize(backupData.backupReference) == 2 && strLstExists(backupData.backupReference, STRDEF("20161219-212741F")) &&
@ -368,6 +371,7 @@ testRun(void)
"[backup]\n" \
"backup-archive-start=\"000000030000028500000089\"\n" \
"backup-archive-stop=\"000000030000028500000090\"\n" \
"backup-block-incr=true\n" \
"backup-label=\"20190818-084502F_20190820-084502I\"\n" \
"backup-lsn-start=\"285/89000028\"\n" \
"backup-lsn-stop=\"285/89001F88\"\n" \
@ -410,10 +414,11 @@ testRun(void)
",\"timestamp\":1565282115}\n" \
"pg_data/base/32768/33000={\"checksum\":\"7a16d165e4775f7c92e8cdf60c0af57313f0bf90\",\"checksum-page\":true" \
",\"reference\":\"20190818-084502F\",\"size\":1073741824,\"timestamp\":1565282116}\n" \
"pg_data/base/32768/33000.32767={\"checksum\":\"6e99b589e550e68e934fd235ccba59fe5b592a9e\",\"checksum-page\":true" \
",\"reference\":\"20190818-084502F_20190819-084506I\",\"size\":32768,\"timestamp\":1565282114}\n" \
"pg_data/postgresql.conf={\"checksum\":\"6721d92c9fcdf4248acff1f9a1377127d9064807\",\"size\":4457" \
"pg_data/base/32768/33000.32767={\"bims\":88,\"bis\":1,\"checksum\":\"6e99b589e550e68e934fd235ccba59fe5b592a9e\"" \
",\"checksum-page\":true,\"reference\":\"20190818-084502F_20190819-084506I\",\"size\":32768" \
",\"timestamp\":1565282114}\n" \
"pg_data/postgresql.conf={\"bims\":12,\"bis\":1,\"checksum\":\"6721d92c9fcdf4248acff1f9a1377127d9064807\"" \
",\"size\":4457,\"timestamp\":1565282114}\n" \
"pg_data/special={\"mode\":\"0640\",\"size\":0,\"timestamp\":1565282120,\"user\":false}\n" \
"pg_data/dupref={\"mode\":\"0640\",\"reference\":\"20190818-084502F\",\"size\":0" \
",\"timestamp\":1565282120,\"user\":false}\n" \
@ -458,6 +463,8 @@ testRun(void)
TEST_RESULT_UINT(backupData.backupInfoSizeDelta, 12653, "backup size");
TEST_RESULT_UINT(backupData.backupInfoRepoSize, 1073783153, "repo size");
TEST_RESULT_UINT(backupData.backupInfoRepoSizeDelta, 8557, "repo backup size");
TEST_RESULT_UINT(varUInt64(backupData.backupInfoRepoSizeMap), 100, "repo map size");
TEST_RESULT_UINT(varUInt64(backupData.backupInfoRepoSizeMapDelta), 12, "repo map size delta");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("infoBackupDataAnnotationSet()");

View File

@ -54,13 +54,14 @@ testRun(void)
"backup-timestamp-stop=0\n" \
"backup-type=\"full\"\n"
#define TEST_MANIFEST_HEADER_BUNDLE \
#define TEST_MANIFEST_HEADER_BUNDLE_BLOCK \
"[backup]\n" \
"backup-block-incr=true\n" \
"backup-bundle=true\n" \
"backup-label=null\n" \
"backup-reference=\"\"\n" \
"backup-timestamp-copy-start=0\n" \
"backup-timestamp-start=0\n" \
"backup-timestamp-start=1570000000\n" \
"backup-timestamp-stop=0\n" \
"backup-type=\"full\"\n"
@ -294,7 +295,7 @@ testRun(void)
// Test tablespace error
TEST_ERROR(
manifestNewBuild(
storagePg, PG_VERSION_93, hrnPgCatalogVersion(PG_VERSION_93), false, false, false, exclusionList,
storagePg, PG_VERSION_93, hrnPgCatalogVersion(PG_VERSION_93), 0, false, false, false, false, exclusionList,
pckWriteResult(tablespaceList)),
AssertError,
"tablespace with oid 1 not found in tablespace map\n"
@ -319,7 +320,7 @@ testRun(void)
TEST_ASSIGN(
manifest,
manifestNewBuild(
storagePg, PG_VERSION_93, hrnPgCatalogVersion(PG_VERSION_93), false, false, false, NULL,
storagePg, PG_VERSION_93, hrnPgCatalogVersion(PG_VERSION_93), 0, false, false, false, false, NULL,
pckWriteResult(tablespaceList)),
"build manifest");
TEST_RESULT_VOID(manifestBackupLabelSet(manifest, STRDEF("20190818-084502F")), "backup label set");
@ -415,7 +416,8 @@ testRun(void)
// Test manifest - temp tables, unlogged tables, pg_serial and pg_xlog files ignored
TEST_ASSIGN(
manifest,
manifestNewBuild(storagePg, PG_VERSION_93, hrnPgCatalogVersion(PG_VERSION_93), true, false, false, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_93, hrnPgCatalogVersion(PG_VERSION_93), 0, true, false, false, false, NULL, NULL),
"build manifest");
contentSave = bufNew(0);
@ -489,7 +491,8 @@ testRun(void)
THROW_ON_SYS_ERROR(symlink(TEST_PATH "/wal", TEST_PATH "/wal/wal") == -1, FileOpenError, "unable to create symlink");
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_96, hrnPgCatalogVersion(PG_VERSION_96), false, false, false, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_96, hrnPgCatalogVersion(PG_VERSION_96), 0, false, false, false, false, NULL, NULL),
LinkDestinationError,
"link 'pg_xlog/wal' (" TEST_PATH "/wal) destination is the same directory as link 'pg_xlog' (" TEST_PATH "/wal)");
@ -544,7 +547,8 @@ testRun(void)
// Test manifest - pg_dynshmem, pg_replslot and postgresql.auto.conf.tmp files ignored
TEST_ASSIGN(
manifest,
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, true, false, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), 0, false, true, false, false, NULL, NULL),
"build manifest");
contentSave = bufNew(0);
@ -637,7 +641,8 @@ testRun(void)
// Tablespace link errors when correct verion not found
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_12, hrnPgCatalogVersion(PG_VERSION_12), false, false, false, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_12, hrnPgCatalogVersion(PG_VERSION_12), 0, false, false, false, false, NULL, NULL),
FileOpenError,
"unable to get info for missing path/file '" TEST_PATH "/pg/pg_tblspc/1/PG_12_201909212': [2] No such file or"
" directory");
@ -657,7 +662,8 @@ testRun(void)
// pg_wal contents will be ignored online. pg_clog pgVersion > 10 primary:true, pg_xact pgVersion > 10 primary:false
TEST_ASSIGN(
manifest,
manifestNewBuild(storagePg, PG_VERSION_12, hrnPgCatalogVersion(PG_VERSION_12), true, false, false, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_12, hrnPgCatalogVersion(PG_VERSION_12), 0, true, false, false, false, NULL, NULL),
"build manifest");
contentSave = bufNew(0);
@ -725,12 +731,26 @@ testRun(void)
"check manifest");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("run 13, offline");
TEST_TITLE("run 13, offline, block incr");
// Create file that is large enough for block incr
Buffer *buffer = bufNew(128 * 1024);
memset(bufPtr(buffer), 0, bufSize(buffer));
bufUsedSet(buffer, bufSize(buffer));
HRN_STORAGE_PUT(storagePgWrite, "128k", buffer, .modeFile = 0600, .timeModified = 1570000000);
// Create file that is large enough for block incr and far enough in the past to get a multiplier
HRN_STORAGE_PUT(storagePgWrite, "128k-1week", buffer, .modeFile = 0600, .timeModified = 1570000000 - (7 * 86400));
// Create file that is large enough for block incr and old enough to not need block incr
HRN_STORAGE_PUT(storagePgWrite, "128k-4week", buffer, .modeFile = 0600, .timeModified = 1570000000 - (28 * 86400));
// pg_wal not ignored
TEST_ASSIGN(
manifest,
manifestNewBuild(storagePg, PG_VERSION_13, hrnPgCatalogVersion(PG_VERSION_13), false, false, true, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_13, hrnPgCatalogVersion(PG_VERSION_13), 1570000000, false, false, true, true, NULL, NULL),
"build manifest");
contentSave = bufNew(0);
@ -738,7 +758,7 @@ testRun(void)
TEST_RESULT_STR(
strNewBuf(contentSave),
strNewBuf(harnessInfoChecksumZ(
TEST_MANIFEST_HEADER_BUNDLE
TEST_MANIFEST_HEADER_BUNDLE_BLOCK
TEST_MANIFEST_DB_13
TEST_MANIFEST_OPTION_ALL
"\n"
@ -749,6 +769,9 @@ testRun(void)
"pg_data/postgresql.conf={\"file\":\"postgresql.conf\",\"path\":\"../config\",\"type\":\"link\"}\n"
"\n"
"[target:file]\n"
"pg_data/128k={\"bis\":16,\"size\":131072,\"timestamp\":1570000000}\n"
"pg_data/128k-1week={\"bis\":32,\"size\":131072,\"timestamp\":1569395200}\n"
"pg_data/128k-4week={\"size\":131072,\"timestamp\":1567580800}\n"
"pg_data/PG_VERSION={\"size\":3,\"timestamp\":1565282100}\n"
"pg_data/base/1/555_init={\"size\":0,\"timestamp\":1565282114}\n"
"pg_data/base/1/555_init.1={\"size\":0,\"timestamp\":1565282114}\n"
@ -800,7 +823,8 @@ testRun(void)
THROW_ON_SYS_ERROR(symlink(TEST_PATH "/pg/base", TEST_PATH "/pg/link") == -1, FileOpenError, "unable to create symlink");
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, false, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), 0, false, false, false, false, NULL, NULL),
LinkDestinationError, "link 'link' destination '" TEST_PATH "/pg/base' is in PGDATA");
THROW_ON_SYS_ERROR(unlink(TEST_PATH "/pg/link") == -1, FileRemoveError, "unable to remove symlink");
@ -811,7 +835,8 @@ testRun(void)
HRN_STORAGE_PATH_CREATE(storagePgWrite, MANIFEST_TARGET_PGTBLSPC "/somedir", .mode = 0700);
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, false, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), 0, false, false, false, false, NULL, NULL),
LinkExpectedError, "'pg_data/pg_tblspc/somedir' is not a symlink - pg_tblspc should contain only symlinks");
HRN_STORAGE_PATH_REMOVE(storagePgWrite, MANIFEST_TARGET_PGTBLSPC "/somedir");
@ -822,7 +847,8 @@ testRun(void)
HRN_STORAGE_PUT_EMPTY(storagePgWrite, MANIFEST_TARGET_PGTBLSPC "/somefile");
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, false, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), 0, false, false, false, false, NULL, NULL),
LinkExpectedError, "'pg_data/pg_tblspc/somefile' is not a symlink - pg_tblspc should contain only symlinks");
TEST_STORAGE_EXISTS(storagePgWrite, MANIFEST_TARGET_PGTBLSPC "/somefile", .remove = true);
@ -833,7 +859,8 @@ testRun(void)
THROW_ON_SYS_ERROR(symlink("../bogus-link", TEST_PATH "/pg/link-to-link") == -1, FileOpenError, "unable to create symlink");
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, true, false, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), 0, false, true, false, false, NULL, NULL),
FileOpenError,
"unable to get info for missing path/file '" TEST_PATH "/pg/link-to-link': [2] No such file or directory");
@ -849,7 +876,8 @@ testRun(void)
symlink(TEST_PATH "/linktest", TEST_PATH "/pg/linktolink") == -1, FileOpenError, "unable to create symlink");
TEST_ERROR(
manifestNewBuild(storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), false, false, false, NULL, NULL),
manifestNewBuild(
storagePg, PG_VERSION_94, hrnPgCatalogVersion(PG_VERSION_94), 0, false, false, false, false, NULL, NULL),
LinkDestinationError, "link '" TEST_PATH "/pg/linktolink' cannot reference another link '" TEST_PATH "/linktest'");
#undef TEST_MANIFEST_HEADER
@ -1310,6 +1338,83 @@ testRun(void)
TEST_MANIFEST_PATH_DEFAULT)),
"check manifest");
manifestPrior->pub.data.backupOptionOnline = BOOL_TRUE_VAR;
manifest->pub.data.backupOptionOnline = BOOL_TRUE_VAR;
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("block incr delta");
lstClear(manifest->pub.fileList);
lstClear(manifestPrior->pub.fileList);
// Prior file was not block incr but current file is
manifestFileAdd(
manifest,
&(ManifestFile){
.name = STRDEF(MANIFEST_TARGET_PGDATA "/block-incr-add"), .copy = true, .size = 6, .sizeRepo = 6,
.blockIncrSize = 8192, .timestamp = 1482182861, .mode = 0600, .group = STRDEF("test"), .user = STRDEF("test")});
manifestFileAdd(
manifestPrior,
&(ManifestFile){
.name = STRDEF(MANIFEST_TARGET_PGDATA "/block-incr-add"), .size = 4, .sizeRepo = 4, .timestamp = 1482182860,
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("ddddddddddbbbbbbbbbbccccccccccaaaaaaaaaa")))});
// Prior file was block incr but current file is not
manifestFileAdd(
manifest,
&(ManifestFile){
.name = STRDEF(MANIFEST_TARGET_PGDATA "/block-incr-sub"), .copy = true, .size = 6, .sizeRepo = 6,
.timestamp = 1482182861, .mode = 0600, .group = STRDEF("test"), .user = STRDEF("test")});
manifestFileAdd(
manifestPrior,
&(ManifestFile){
.name = STRDEF(MANIFEST_TARGET_PGDATA "/block-incr-sub"), .size = 4, .sizeRepo = 4, .blockIncrSize = 8192,
.blockIncrMapSize = 66, .timestamp = 1482182860,
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("ddddddddddbbbbbbbbbbccccccccccaaaaaaaaaa")))});
// Prior file has different block incr size
manifestFileAdd(
manifest,
&(ManifestFile){
.name = STRDEF(MANIFEST_TARGET_PGDATA "/block-incr-keep-size"), .copy = true, .size = 6, .sizeRepo = 6,
.blockIncrSize = 16384, .timestamp = 1482182861, .mode = 0600, .group = STRDEF("test"), .user = STRDEF("test")});
manifestFileAdd(
manifestPrior,
&(ManifestFile){
.name = STRDEF(MANIFEST_TARGET_PGDATA "/block-incr-keep-size"), .size = 4, .sizeRepo = 4, .blockIncrSize = 8192,
.blockIncrMapSize = 31, .timestamp = 1482182860,
.checksumSha1 = bufPtr(bufNewDecode(encodingHex, STRDEF("ddddddddddbbbbbbbbbbccccccccccaaaaaaaaaa")))});
TEST_RESULT_VOID(
manifestBuildIncr(manifest, manifestPrior, backupTypeIncr, STRDEF("000000030000000300000003")), "incremental manifest");
contentSave = bufNew(0);
TEST_RESULT_VOID(manifestSave(manifest, ioBufferWriteNew(contentSave)), "save manifest");
TEST_RESULT_STR(
strNewBuf(contentSave),
strNewBuf(harnessInfoChecksumZ(
TEST_MANIFEST_HEADER_PRE
"backup-reference=\"20190101-010101F,20190101-010101F_20190202-010101D\"\n"
TEST_MANIFEST_HEADER_MID
"option-delta=true\n"
"option-hardlink=false\n"
"option-online=true\n"
"\n"
"[backup:target]\n"
"pg_data={\"path\":\"/pg\",\"type\":\"path\"}\n"
"\n"
"[target:file]\n"
"pg_data/block-incr-add={\"bis\":1,\"size\":6,\"timestamp\":1482182861}\n"
"pg_data/block-incr-keep-size={\"bims\":31,\"bis\":1,\"checksum\":\"ddddddddddbbbbbbbbbbccccccccccaaaaaaaaaa\""
",\"reference\":\"20190101-010101F\",\"repo-size\":4,\"size\":6,\"timestamp\":1482182861}\n"
"pg_data/block-incr-sub={\"size\":6,\"timestamp\":1482182861}\n"
TEST_MANIFEST_FILE_DEFAULT
"\n"
"[target:path]\n"
"pg_data={}\n"
TEST_MANIFEST_PATH_DEFAULT)),
"check manifest");
#undef TEST_MANIFEST_HEADER_PRE
#undef TEST_MANIFEST_HEADER_MID
#undef TEST_MANIFEST_HEADER_POST
@ -1409,6 +1514,7 @@ testRun(void)
"[backup]\n" \
"backup-archive-start=\"000000030000028500000089\"\n" \
"backup-archive-stop=\"000000030000028500000089\"\n" \
"backup-block-incr=true\n" \
"backup-bundle=true\n" \
"backup-label=\"20190818-084502F_20190820-084502D\"\n" \
"backup-lsn-start=\"285/89000028\"\n" \
@ -1416,7 +1522,7 @@ testRun(void)
"backup-prior=\"20190818-084502F\"\n" \
"backup-reference=\"20190818-084502F_20190819-084506D,20190818-084502F,20190818-084502F_20190820-084502D\"\n" \
"backup-timestamp-copy-start=1565282141\n" \
"backup-timestamp-start=1565282140\n" \
"backup-timestamp-start=777\n" \
"backup-timestamp-stop=1565282142\n" \
"backup-type=\"full\"\n" \
"\n" \
@ -1482,8 +1588,8 @@ testRun(void)
",\"group\":\"group2\",\"size\":4,\"timestamp\":1565282115,\"user\":false}\n" \
"pg_data/base/32768/33000={\"checksum\":\"7a16d165e4775f7c92e8cdf60c0af57313f0bf90\",\"checksum-page\":true" \
",\"reference\":\"20190818-084502F\",\"size\":1073741824,\"timestamp\":1565282116}\n" \
"pg_data/base/32768/33000.32767={\"checksum\":\"6e99b589e550e68e934fd235ccba59fe5b592a9e\",\"checksum-page\":true" \
",\"reference\":\"20190818-084502F\",\"size\":32768,\"timestamp\":1565282114}\n" \
"pg_data/base/32768/33000.32767={\"bims\":96,\"bis\":3,\"checksum\":\"6e99b589e550e68e934fd235ccba59fe5b592a9e\"," \
"\"checksum-page\":true,\"reference\":\"20190818-084502F\",\"size\":32768,\"timestamp\":1565282114}\n" \
"pg_data/postgresql.conf={\"size\":4457,\"timestamp\":1565282114}\n" \
"pg_data/special-@#!$^&*()_+~`{}[]\\:;={\"mode\":\"0640\",\"size\":0,\"timestamp\":1565282120,\"user\":false}\n"
@ -1528,6 +1634,7 @@ testRun(void)
"[backup]\n"
"backup-archive-start=\"000000040000028500000089\"\n"
"backup-archive-stop=\"000000040000028500000089\"\n"
"backup-block-incr=true\n"
"backup-bundle=true\n"
"backup-label=\"20190818-084502F_20190820-084502D\"\n"
"backup-lsn-start=\"300/89000028\"\n"
@ -1617,7 +1724,7 @@ testRun(void)
TEST_TITLE("manifest complete");
TEST_RESULT_VOID(
manifestBuildComplete(manifest, 0, NULL, NULL, 0, NULL, NULL, 0, 0, NULL, false, false, 0, 0, 0, false, 0, false, NULL),
manifestBuildComplete(manifest, NULL, NULL, 0, NULL, NULL, 0, 0, NULL, false, false, 0, 0, 0, false, 0, false, NULL),
"manifest complete without db");
// Create empty annotations
@ -1627,7 +1734,7 @@ testRun(void)
TEST_RESULT_VOID(
manifestBuildComplete(
manifest, 0, NULL, NULL, 0, NULL, NULL, 0, 0, NULL, false, false, 0, 0, 0, false, 0, false, annotationKV),
manifest, NULL, NULL, 0, NULL, NULL, 0, 0, NULL, false, false, 0, 0, 0, false, 0, false, annotationKV),
"manifest complete without db and empty annotations");
// Create db list
@ -1659,9 +1766,9 @@ testRun(void)
TEST_RESULT_VOID(
manifestBuildComplete(
manifest, 1565282140, STRDEF("285/89000028"), STRDEF("000000030000028500000089"), 1565282142,
STRDEF("285/89001F88"), STRDEF("000000030000028500000089"), 1, 1000000000000000094, pckWriteResult(dbList),
true, true, 16384, 3, 6, true, 32, false, annotationKV),
manifest, STRDEF("285/89000028"), STRDEF("000000030000028500000089"), 1565282142, STRDEF("285/89001F88"),
STRDEF("000000030000028500000089"), 1, 1000000000000000094, pckWriteResult(dbList), true, true, 16384, 3, 6, true,
32, false, annotationKV),
"manifest complete with db");
TEST_RESULT_STR_Z(manifestPathPg(STRDEF("pg_data")), NULL, "check pg_data path");

View File

@ -267,7 +267,8 @@ testRun(void)
MEM_CONTEXT_BEGIN(testContext)
{
TEST_ASSIGN(
manifest, manifestNewBuild(storagePg, PG_VERSION_15, 999999999, false, false, false, NULL, NULL), "build files");
manifest, manifestNewBuild(storagePg, PG_VERSION_15, 999999999, false, false, false, false, 0, NULL, NULL),
"build files");
}
MEM_CONTEXT_END();