mirror of
https://github.com/pgbackrest/pgbackrest.git
synced 2024-12-12 10:04:14 +02:00
Optimize restore command for file bundling.
Since files are stored sequentially in a bundle, it is often possible to restore multiple files with a single read. Previously, each restored file required a separate read. Reducing the number of reads is particularly beneficial for object stores, but performance should benefit on any file system. Currently if there is a gap then a new read is required. In the future we might set a limit for how large a gap we'll skip without starting a new read.
This commit is contained in:
parent
f7ab002aa7
commit
dca6da86bf
@ -46,9 +46,15 @@
|
||||
<release-item>
|
||||
<commit subject="Remove redundant restoreFile() test and improve coverage."/>
|
||||
<commit subject="Add limit parameter to ioCopyP()."/>
|
||||
<commit subject="Optimize restore command for file bundling.">
|
||||
<github-issue id="1149"/>
|
||||
<github-pull-request id="1683"/>
|
||||
</commit>
|
||||
|
||||
<release-item-contributor-list>
|
||||
<release-item-contributor id="david.steele"/>
|
||||
<release-item-reviewer id="reid.thompson"/>
|
||||
<release-item-reviewer id="stefan.fercot"/>
|
||||
</release-item-contributor-list>
|
||||
|
||||
<p>Improve small file support.</p>
|
||||
|
@ -20,145 +20,185 @@ Restore File
|
||||
#include "storage/helper.h"
|
||||
|
||||
/**********************************************************************************************************************************/
|
||||
bool
|
||||
restoreFile(
|
||||
const String *const repoFile, unsigned int repoIdx, const uint64_t offset, const Variant *const limit,
|
||||
const CompressType repoFileCompressType, const String *const pgFile, const String *const pgFileChecksum, const bool pgFileZero,
|
||||
const uint64_t pgFileSize, const time_t pgFileModified, const mode_t pgFileMode, const String *const pgFileUser,
|
||||
const String *const pgFileGroup, const time_t copyTimeBegin, const bool delta, const bool deltaForce,
|
||||
const String *const cipherPass)
|
||||
List *restoreFile(
|
||||
const String *const repoFile, const unsigned int repoIdx, const CompressType repoFileCompressType, const time_t copyTimeBegin,
|
||||
const bool delta, const bool deltaForce, const String *const cipherPass, const List *const fileList)
|
||||
{
|
||||
FUNCTION_LOG_BEGIN(logLevelDebug);
|
||||
FUNCTION_LOG_PARAM(STRING, repoFile);
|
||||
FUNCTION_LOG_PARAM(UINT, repoIdx);
|
||||
FUNCTION_LOG_PARAM(UINT64, offset);
|
||||
FUNCTION_LOG_PARAM(VARIANT, limit);
|
||||
FUNCTION_LOG_PARAM(ENUM, repoFileCompressType);
|
||||
FUNCTION_LOG_PARAM(STRING, pgFile);
|
||||
FUNCTION_LOG_PARAM(STRING, pgFileChecksum);
|
||||
FUNCTION_LOG_PARAM(BOOL, pgFileZero);
|
||||
FUNCTION_LOG_PARAM(UINT64, pgFileSize);
|
||||
FUNCTION_LOG_PARAM(TIME, pgFileModified);
|
||||
FUNCTION_LOG_PARAM(MODE, pgFileMode);
|
||||
FUNCTION_LOG_PARAM(STRING, pgFileUser);
|
||||
FUNCTION_LOG_PARAM(STRING, pgFileGroup);
|
||||
FUNCTION_LOG_PARAM(TIME, copyTimeBegin);
|
||||
FUNCTION_LOG_PARAM(BOOL, delta);
|
||||
FUNCTION_LOG_PARAM(BOOL, deltaForce);
|
||||
FUNCTION_TEST_PARAM(STRING, cipherPass);
|
||||
FUNCTION_LOG_PARAM(LIST, fileList); // List of files to restore
|
||||
FUNCTION_LOG_END();
|
||||
|
||||
ASSERT(repoFile != NULL);
|
||||
ASSERT(pgFile != NULL);
|
||||
ASSERT(limit == NULL || varType(limit) == varTypeUInt64);
|
||||
|
||||
// Was the file copied?
|
||||
bool result = true;
|
||||
|
||||
// Is the file compressible during the copy?
|
||||
bool compressible = true;
|
||||
// Restore file results
|
||||
List *result = NULL;
|
||||
|
||||
MEM_CONTEXT_TEMP_BEGIN()
|
||||
{
|
||||
// Perform delta if requested. Delta zero-length files to avoid overwriting the file if the timestamp is correct.
|
||||
if (delta && !pgFileZero)
|
||||
result = lstNewP(sizeof(RestoreFileResult));
|
||||
|
||||
// Check files to determine which ones need to be restored
|
||||
for (unsigned int fileIdx = 0; fileIdx < lstSize(fileList); fileIdx++)
|
||||
{
|
||||
// Perform delta if the file exists
|
||||
StorageInfo info = storageInfoP(storagePg(), pgFile, .ignoreMissing = true, .followLink = true);
|
||||
const RestoreFile *const file = lstGet(fileList, fileIdx);
|
||||
ASSERT(file->name != NULL);
|
||||
ASSERT(file->limit == NULL || varType(file->limit) == varTypeUInt64);
|
||||
|
||||
if (info.exists)
|
||||
RestoreFileResult *const fileResult = lstAdd(
|
||||
result, &(RestoreFileResult){.manifestFile = file->manifestFile, .result = restoreResultCopy});
|
||||
|
||||
// Perform delta if requested. Delta zero-length files to avoid overwriting the file if the timestamp is correct.
|
||||
if (delta && !file->zero)
|
||||
{
|
||||
// If force then use size/timestamp delta
|
||||
if (deltaForce)
|
||||
// Perform delta if the file exists
|
||||
StorageInfo info = storageInfoP(storagePg(), file->name, .ignoreMissing = true, .followLink = true);
|
||||
|
||||
if (info.exists)
|
||||
{
|
||||
// Make sure that timestamp/size are equal and that timestamp is before the copy start time of the backup
|
||||
if (info.size == pgFileSize && info.timeModified == pgFileModified && info.timeModified < copyTimeBegin)
|
||||
result = false;
|
||||
}
|
||||
// Else use size and checksum
|
||||
else
|
||||
{
|
||||
// Only continue delta if the file size is as expected
|
||||
if (info.size == pgFileSize)
|
||||
// If force then use size/timestamp delta
|
||||
if (deltaForce)
|
||||
{
|
||||
// Generate checksum for the file if size is not zero
|
||||
IoRead *read = NULL;
|
||||
|
||||
if (info.size != 0)
|
||||
// Make sure that timestamp/size are equal and that timestamp is before the copy start time of the backup
|
||||
if (info.size == file->size && info.timeModified == file->timeModified && info.timeModified < copyTimeBegin)
|
||||
fileResult->result = restoreResultPreserve;
|
||||
}
|
||||
// Else use size and checksum
|
||||
else
|
||||
{
|
||||
// Only continue delta if the file size is as expected
|
||||
if (info.size == file->size)
|
||||
{
|
||||
read = storageReadIo(storageNewReadP(storagePgWrite(), pgFile));
|
||||
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(HASH_TYPE_SHA1_STR));
|
||||
ioReadDrain(read);
|
||||
}
|
||||
// Generate checksum for the file if size is not zero
|
||||
IoRead *read = NULL;
|
||||
|
||||
// If size and checksum are equal then no need to copy the file
|
||||
if (pgFileSize == 0 ||
|
||||
strEq(
|
||||
pgFileChecksum,
|
||||
pckReadStrP(ioFilterGroupResultP(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE))))
|
||||
{
|
||||
// Even if hash/size are the same set the time back to backup time. This helps with unit testing, but
|
||||
// also presents a pristine version of the database after restore.
|
||||
if (info.timeModified != pgFileModified)
|
||||
if (info.size != 0)
|
||||
{
|
||||
THROW_ON_SYS_ERROR_FMT(
|
||||
utime(
|
||||
strZ(storagePathP(storagePg(), pgFile)),
|
||||
&((struct utimbuf){.actime = pgFileModified, .modtime = pgFileModified})) == -1,
|
||||
FileInfoError, "unable to set time for '%s'", strZ(storagePathP(storagePg(), pgFile)));
|
||||
read = storageReadIo(storageNewReadP(storagePgWrite(), file->name));
|
||||
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(HASH_TYPE_SHA1_STR));
|
||||
ioReadDrain(read);
|
||||
}
|
||||
|
||||
result = false;
|
||||
// If the checksum is also equal (or file is zero size) then no need to copy the file
|
||||
if (file->size == 0 ||
|
||||
strEq(
|
||||
file->checksum,
|
||||
pckReadStrP(ioFilterGroupResultP(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE))))
|
||||
{
|
||||
// Even if hash/size are the same set the time back to backup time. This helps with unit testing,
|
||||
// but also presents a pristine version of the database after restore.
|
||||
if (info.timeModified != file->timeModified)
|
||||
{
|
||||
THROW_ON_SYS_ERROR_FMT(
|
||||
utime(
|
||||
strZ(storagePathP(storagePg(), file->name)),
|
||||
&((struct utimbuf){.actime = file->timeModified, .modtime = file->timeModified})) == -1,
|
||||
FileInfoError, "unable to set time for '%s'", strZ(storagePathP(storagePg(), file->name)));
|
||||
}
|
||||
|
||||
fileResult->result = restoreResultPreserve;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Copy file from repository to database or create zero-length/sparse file
|
||||
if (result)
|
||||
{
|
||||
// Create destination file
|
||||
StorageWrite *pgFileWrite = storageNewWriteP(
|
||||
storagePgWrite(), pgFile, .modeFile = pgFileMode, .user = pgFileUser, .group = pgFileGroup,
|
||||
.timeModified = pgFileModified, .noAtomic = true, .noCreatePath = true, .noSyncPath = true);
|
||||
|
||||
// If size is zero/sparse no need to actually copy
|
||||
if (pgFileSize == 0 || pgFileZero)
|
||||
// Create zeroed and zero-length files
|
||||
if (fileResult->result == restoreResultCopy && (file->size == 0 || file->zero))
|
||||
{
|
||||
// Create destination file
|
||||
StorageWrite *pgFileWrite = storageNewWriteP(
|
||||
storagePgWrite(), file->name, .modeFile = file->mode, .user = file->user, .group = file->group,
|
||||
.timeModified = file->timeModified, .noAtomic = true, .noCreatePath = true, .noSyncPath = true);
|
||||
|
||||
ioWriteOpen(storageWriteIo(pgFileWrite));
|
||||
|
||||
// Truncate the file to specified length (note in this case the file with grow, not shrink)
|
||||
if (pgFileZero)
|
||||
// Truncate the file to specified length (note in this case the file will grow, not shrink)
|
||||
if (file->zero)
|
||||
{
|
||||
THROW_ON_SYS_ERROR_FMT(
|
||||
ftruncate(ioWriteFd(storageWriteIo(pgFileWrite)), (off_t)pgFileSize) == -1, FileWriteError,
|
||||
"unable to truncate '%s'", strZ(pgFile));
|
||||
|
||||
// Report the file as not copied
|
||||
result = false;
|
||||
ftruncate(ioWriteFd(storageWriteIo(pgFileWrite)), (off_t)file->size) == -1, FileWriteError,
|
||||
"unable to truncate '%s'", strZ(file->name));
|
||||
}
|
||||
|
||||
ioWriteClose(storageWriteIo(pgFileWrite));
|
||||
|
||||
// Report the file as zeroed or zero-length
|
||||
fileResult->result = restoreResultZero;
|
||||
}
|
||||
// Else perform the copy
|
||||
else
|
||||
}
|
||||
|
||||
// Copy files from repository to database
|
||||
StorageRead *repoFileRead = NULL;
|
||||
uint64_t repoFileLimit = 0;
|
||||
|
||||
for (unsigned int fileIdx = 0; fileIdx < lstSize(fileList); fileIdx++)
|
||||
{
|
||||
const RestoreFile *const file = lstGet(fileList, fileIdx);
|
||||
const RestoreFileResult *const fileResult = lstGet(result, fileIdx);
|
||||
|
||||
// Copy file from repository to database
|
||||
if (fileResult->result == restoreResultCopy)
|
||||
{
|
||||
// If no repo file is currently open
|
||||
if (repoFileLimit == 0)
|
||||
{
|
||||
// If a limit is specified then we need to use it, even if there is only one pg file to copy, because we might
|
||||
// be reading from the middle of a repo file containing many pg files
|
||||
if (file->limit != NULL)
|
||||
{
|
||||
ASSERT(varUInt64(file->limit) != 0);
|
||||
repoFileLimit = varUInt64(file->limit);
|
||||
|
||||
// Determine how many files can be copied with one read
|
||||
for (unsigned int fileNextIdx = fileIdx + 1; fileNextIdx < lstSize(fileList); fileNextIdx++)
|
||||
{
|
||||
// Only files that are being copied are considered
|
||||
if (((const RestoreFileResult *)lstGet(result, fileNextIdx))->result == restoreResultCopy)
|
||||
{
|
||||
const RestoreFile *const fileNext = lstGet(fileList, fileNextIdx);
|
||||
ASSERT(fileNext->limit != NULL && varUInt64(fileNext->limit) != 0);
|
||||
|
||||
// Break if the offset is not the first file's offset + the limit of all additional files so far
|
||||
if (fileNext->offset != file->offset + repoFileLimit)
|
||||
break;
|
||||
|
||||
repoFileLimit += varUInt64(fileNext->limit);
|
||||
}
|
||||
// Else if the file was not copied then there is a gap so break
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Create and open the repo file
|
||||
repoFileRead = storageNewReadP(
|
||||
storageRepoIdx(repoIdx), repoFile,
|
||||
.compressible = repoFileCompressType == compressTypeNone && cipherPass == NULL, .offset = file->offset,
|
||||
.limit = repoFileLimit != 0 ? VARUINT64(repoFileLimit) : NULL);
|
||||
ioReadOpen(storageReadIo(repoFileRead));
|
||||
}
|
||||
|
||||
// Create pg file
|
||||
StorageWrite *pgFileWrite = storageNewWriteP(
|
||||
storagePgWrite(), file->name, .modeFile = file->mode, .user = file->user, .group = file->group,
|
||||
.timeModified = file->timeModified, .noAtomic = true, .noCreatePath = true, .noSyncPath = true);
|
||||
|
||||
IoFilterGroup *filterGroup = ioWriteFilterGroup(storageWriteIo(pgFileWrite));
|
||||
|
||||
// Add decryption filter
|
||||
if (cipherPass != NULL)
|
||||
{
|
||||
ioFilterGroupAdd(filterGroup, cipherBlockNew(cipherModeDecrypt, cipherTypeAes256Cbc, BUFSTR(cipherPass), NULL));
|
||||
compressible = false;
|
||||
}
|
||||
|
||||
// Add decompression filter
|
||||
if (repoFileCompressType != compressTypeNone)
|
||||
{
|
||||
ioFilterGroupAdd(filterGroup, decompressFilter(repoFileCompressType));
|
||||
compressible = false;
|
||||
}
|
||||
|
||||
// Add sha1 filter
|
||||
ioFilterGroupAdd(filterGroup, cryptoHashNew(HASH_TYPE_SHA1_STR));
|
||||
@ -167,23 +207,35 @@ restoreFile(
|
||||
ioFilterGroupAdd(filterGroup, ioSizeNew());
|
||||
|
||||
// Copy file
|
||||
storageCopyP(
|
||||
storageNewReadP(
|
||||
storageRepoIdx(repoIdx), repoFile, .compressible = compressible, .offset = offset, .limit = limit),
|
||||
pgFileWrite);
|
||||
ioWriteOpen(storageWriteIo(pgFileWrite));
|
||||
ioCopyP(storageReadIo(repoFileRead), storageWriteIo(pgFileWrite), .limit = file->limit);
|
||||
ioWriteClose(storageWriteIo(pgFileWrite));
|
||||
|
||||
// If more than one file is being copied from a single read then decrement the limit
|
||||
if (repoFileLimit != 0)
|
||||
repoFileLimit -= varUInt64(file->limit);
|
||||
|
||||
// Free the repo file when there are no more files to copy from it
|
||||
if (repoFileLimit == 0)
|
||||
storageReadFree(repoFileRead);
|
||||
|
||||
// Validate checksum
|
||||
if (!strEq(pgFileChecksum, pckReadStrP(ioFilterGroupResultP(filterGroup, CRYPTO_HASH_FILTER_TYPE))))
|
||||
if (!strEq(file->checksum, pckReadStrP(ioFilterGroupResultP(filterGroup, CRYPTO_HASH_FILTER_TYPE))))
|
||||
{
|
||||
THROW_FMT(
|
||||
ChecksumError,
|
||||
"error restoring '%s': actual checksum '%s' does not match expected checksum '%s'", strZ(pgFile),
|
||||
strZ(pckReadStrP(ioFilterGroupResultP(filterGroup, CRYPTO_HASH_FILTER_TYPE))), strZ(pgFileChecksum));
|
||||
"error restoring '%s': actual checksum '%s' does not match expected checksum '%s'", strZ(file->name),
|
||||
strZ(pckReadStrP(ioFilterGroupResultP(filterGroup, CRYPTO_HASH_FILTER_TYPE))), strZ(file->checksum));
|
||||
}
|
||||
|
||||
// Free the pg file
|
||||
storageWriteFree(pgFileWrite);
|
||||
}
|
||||
}
|
||||
|
||||
lstMove(result, memContextPrior());
|
||||
}
|
||||
MEM_CONTEXT_TEMP_END();
|
||||
|
||||
FUNCTION_LOG_RETURN(BOOL, result);
|
||||
FUNCTION_LOG_RETURN(LIST, result);
|
||||
}
|
||||
|
@ -5,18 +5,45 @@ Restore File
|
||||
#define COMMAND_RESTORE_FILE_H
|
||||
|
||||
#include "common/compress/helper.h"
|
||||
#include "common/crypto/common.h"
|
||||
#include "common/type/string.h"
|
||||
#include "storage/storage.h"
|
||||
#include "common/type/variant.h"
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Restore file types
|
||||
***********************************************************************************************************************************/
|
||||
typedef enum
|
||||
{
|
||||
restoreResultPreserve,
|
||||
restoreResultZero,
|
||||
restoreResultCopy,
|
||||
} RestoreResult;
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Functions
|
||||
***********************************************************************************************************************************/
|
||||
// Copy a file from the backup to the specified destination
|
||||
bool restoreFile(
|
||||
const String *repoFile, unsigned int repoIdx, uint64_t offset, const Variant *limit, CompressType repoFileCompressType,
|
||||
const String *pgFile, const String *pgFileChecksum, bool pgFileZero, uint64_t pgFileSize, time_t pgFileModified,
|
||||
mode_t pgFileMode, const String *pgFileUser, const String *pgFileGroup, time_t copyTimeBegin, bool delta, bool deltaForce,
|
||||
const String *cipherPass);
|
||||
typedef struct RestoreFile
|
||||
{
|
||||
const String *name; // File to restore
|
||||
const String *checksum; // Expected checksum
|
||||
uint64_t size; // Expected size
|
||||
time_t timeModified; // Original modification time
|
||||
mode_t mode; // Original mode
|
||||
bool zero; // Should the file be zeroed?
|
||||
const String *user; // Original user
|
||||
const String *group; // Original group
|
||||
uint64_t offset; // Offset into repo file where pg file is located
|
||||
const Variant *limit; // Limit for read in the repo file
|
||||
const String *manifestFile; // Manifest file
|
||||
} RestoreFile;
|
||||
|
||||
typedef struct RestoreFileResult
|
||||
{
|
||||
const String *manifestFile; // Manifest file
|
||||
RestoreResult result; // Restore result (e.g. preserve, copy)
|
||||
} RestoreFileResult;
|
||||
|
||||
List *restoreFile(
|
||||
const String *repoFile, unsigned int repoIdx, CompressType repoFileCompressType, time_t copyTimeBegin, bool delta,
|
||||
bool deltaForce, const String *cipherPass, const List *fileList);
|
||||
|
||||
#endif
|
||||
|
@ -28,37 +28,54 @@ restoreFileProtocol(PackRead *const param, ProtocolServer *const server)
|
||||
{
|
||||
// Restore file
|
||||
const String *const repoFile = pckReadStrP(param);
|
||||
|
||||
uint64_t offset = 0;
|
||||
const Variant *limit = NULL;
|
||||
|
||||
if (pckReadBoolP(param))
|
||||
{
|
||||
offset = pckReadU64P(param);
|
||||
limit = varNewUInt64(pckReadU64P(param));
|
||||
}
|
||||
|
||||
const unsigned int repoIdx = pckReadU32P(param);
|
||||
const CompressType repoFileCompressType = (CompressType)pckReadU32P(param);
|
||||
const String *const pgFile = pckReadStrP(param);
|
||||
const String *const pgFileChecksum = pckReadStrP(param);
|
||||
const bool pgFileZero = pckReadBoolP(param);
|
||||
const uint64_t pgFileSize = pckReadU64P(param);
|
||||
const time_t pgFileModified = pckReadTimeP(param);
|
||||
const mode_t pgFileMode = pckReadModeP(param);
|
||||
const String *const pgFileUser = pckReadStrP(param);
|
||||
const String *const pgFileGroup = pckReadStrP(param);
|
||||
const time_t copyTimeBegin = pckReadTimeP(param);
|
||||
const bool delta = pckReadBoolP(param);
|
||||
const bool deltaForce = pckReadBoolP(param);
|
||||
const String *const cipherPass = pckReadStrP(param);
|
||||
|
||||
const bool result = restoreFile(
|
||||
repoFile, repoIdx, offset, limit, repoFileCompressType, pgFile, pgFileChecksum, pgFileZero, pgFileSize, pgFileModified,
|
||||
pgFileMode, pgFileUser, pgFileGroup, copyTimeBegin, delta, deltaForce, cipherPass);
|
||||
// Build the file list
|
||||
List *fileList = lstNewP(sizeof(RestoreFile));
|
||||
|
||||
while (!pckReadNullP(param))
|
||||
{
|
||||
RestoreFile file = {.name = pckReadStrP(param)};
|
||||
file.checksum = pckReadStrP(param);
|
||||
file.size = pckReadU64P(param);
|
||||
file.timeModified = pckReadTimeP(param);
|
||||
file.mode = pckReadModeP(param);
|
||||
file.zero = pckReadBoolP(param);
|
||||
file.user = pckReadStrP(param);
|
||||
file.group = pckReadStrP(param);
|
||||
|
||||
if (pckReadBoolP(param))
|
||||
{
|
||||
file.offset = pckReadU64P(param);
|
||||
file.limit = varNewUInt64(pckReadU64P(param));
|
||||
}
|
||||
|
||||
file.manifestFile = pckReadStrP(param);
|
||||
|
||||
lstAdd(fileList, &file);
|
||||
}
|
||||
|
||||
// Restore files
|
||||
const List *const result = restoreFile(
|
||||
repoFile, repoIdx, repoFileCompressType, copyTimeBegin, delta, deltaForce, cipherPass, fileList);
|
||||
|
||||
// Return result
|
||||
protocolServerDataPut(server, pckWriteBoolP(protocolPackNew(), result));
|
||||
PackWrite *const resultPack = protocolPackNew();
|
||||
|
||||
for (unsigned int resultIdx = 0; resultIdx < lstSize(result); resultIdx++)
|
||||
{
|
||||
const RestoreFileResult *const fileResult = lstGet(result, resultIdx);
|
||||
|
||||
pckWriteStrP(resultPack, fileResult->manifestFile);
|
||||
pckWriteU32P(resultPack, fileResult->result);
|
||||
}
|
||||
|
||||
protocolServerDataPut(server, resultPack);
|
||||
protocolServerDataEndPut(server);
|
||||
}
|
||||
MEM_CONTEXT_TEMP_END();
|
||||
|
@ -8,6 +8,7 @@ Restore Command
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "command/restore/file.h"
|
||||
#include "command/restore/protocol.h"
|
||||
#include "command/restore/restore.h"
|
||||
#include "common/crypto/cipherBlock.h"
|
||||
@ -1914,14 +1915,58 @@ restoreProcessQueueComparator(const void *item1, const void *item2)
|
||||
ManifestFile file1 = manifestFileUnpack(restoreProcessQueueComparatorManifest, *(const ManifestFilePack **)item1);
|
||||
ManifestFile file2 = manifestFileUnpack(restoreProcessQueueComparatorManifest, *(const ManifestFilePack **)item2);
|
||||
|
||||
// If the size differs then that's enough to determine order
|
||||
if (file1.size < file2.size)
|
||||
FUNCTION_TEST_RETURN(-1);
|
||||
else if (file1.size > file2.size)
|
||||
// Zero length files should be ordered at the end
|
||||
if (file1.size == 0)
|
||||
{
|
||||
if (file2.size != 0)
|
||||
FUNCTION_TEST_RETURN(-1);
|
||||
}
|
||||
else if (file2.size == 0)
|
||||
FUNCTION_TEST_RETURN(1);
|
||||
|
||||
// If size is the same then use name to generate a deterministic ordering (names must be unique)
|
||||
FUNCTION_TEST_RETURN(strCmp(file1.name, file2.name));
|
||||
// If the bundle id differs that is enough to determine order
|
||||
if (file1.bundleId < file2.bundleId)
|
||||
FUNCTION_TEST_RETURN(1);
|
||||
else if (file1.bundleId > file2.bundleId)
|
||||
FUNCTION_TEST_RETURN(-1);
|
||||
|
||||
// If the bundle ids are 0
|
||||
if (file1.bundleId == 0)
|
||||
{
|
||||
// If the size differs then that's enough to determine order
|
||||
if (file1.size < file2.size)
|
||||
FUNCTION_TEST_RETURN(-1);
|
||||
else if (file1.size > file2.size)
|
||||
FUNCTION_TEST_RETURN(1);
|
||||
|
||||
// If size is the same then use name to generate a deterministic ordering (names must be unique)
|
||||
ASSERT(!strEq(file1.name, file2.name));
|
||||
FUNCTION_TEST_RETURN(strCmp(file1.name, file2.name));
|
||||
}
|
||||
|
||||
// If the reference differs that is enough to determine order
|
||||
if (file1.reference == NULL)
|
||||
{
|
||||
if (file2.reference != NULL)
|
||||
FUNCTION_TEST_RETURN(-1);
|
||||
}
|
||||
else if (file2.reference == NULL)
|
||||
FUNCTION_TEST_RETURN(1);
|
||||
else
|
||||
{
|
||||
const int backupLabelCmp = strCmp(file1.reference, file2.reference) * -1;
|
||||
|
||||
if (backupLabelCmp != 0)
|
||||
FUNCTION_TEST_RETURN(backupLabelCmp);
|
||||
}
|
||||
|
||||
// Finally order by bundle offset
|
||||
ASSERT(file1.bundleOffset != file2.bundleOffset);
|
||||
|
||||
if (file1.bundleOffset < file2.bundleOffset)
|
||||
FUNCTION_TEST_RETURN(1);
|
||||
|
||||
FUNCTION_TEST_RETURN(-1);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
@ -2062,56 +2107,77 @@ restoreJobResult(const Manifest *manifest, ProtocolParallelJob *job, RegExp *zer
|
||||
{
|
||||
MEM_CONTEXT_TEMP_BEGIN()
|
||||
{
|
||||
const ManifestFile file = manifestFileFind(manifest, varStr(protocolParallelJobKey(job)));
|
||||
bool zeroed = restoreFileZeroed(file.name, zeroExp);
|
||||
bool copy = pckReadBoolP(protocolParallelJobResult(job));
|
||||
PackRead *const jobResult = protocolParallelJobResult(job);
|
||||
|
||||
String *log = strCatZ(strNew(), "restore");
|
||||
|
||||
// Note if file was zeroed (i.e. selective restore)
|
||||
if (zeroed)
|
||||
strCatZ(log, " zeroed");
|
||||
|
||||
// Add filename
|
||||
strCatFmt(log, " file %s", strZ(restoreFilePgPath(manifest, file.name)));
|
||||
|
||||
// If not copied and not zeroed add details to explain why it was not copied
|
||||
if (!copy && !zeroed)
|
||||
while (!pckReadNullP(jobResult))
|
||||
{
|
||||
strCatZ(log, " - ");
|
||||
const ManifestFile file = manifestFileFind(manifest, pckReadStrP(jobResult));
|
||||
const bool zeroed = restoreFileZeroed(file.name, zeroExp);
|
||||
const RestoreResult result = (RestoreResult)pckReadU32P(jobResult);
|
||||
|
||||
// On force we match on size and modification time
|
||||
if (cfgOptionBool(cfgOptForce))
|
||||
{
|
||||
strCatFmt(
|
||||
log, "exists and matches size %" PRIu64 " and modification time %" PRIu64, file.size,
|
||||
(uint64_t)file.timestamp);
|
||||
}
|
||||
// Else a checksum delta or file is zero-length
|
||||
else
|
||||
{
|
||||
strCatZ(log, "exists and ");
|
||||
String *log = strCatZ(strNew(), "restore");
|
||||
|
||||
// No need to copy zero-length files
|
||||
if (file.size == 0)
|
||||
// Note if file was zeroed (i.e. selective restore)
|
||||
if (zeroed)
|
||||
strCatZ(log, " zeroed");
|
||||
|
||||
// Add filename
|
||||
strCatFmt(log, " file %s", strZ(restoreFilePgPath(manifest, file.name)));
|
||||
|
||||
// If preserved add details to explain why it was not copied or zeroed
|
||||
if (result == restoreResultPreserve)
|
||||
{
|
||||
strCatZ(log, " - ");
|
||||
|
||||
// On force we match on size and modification time
|
||||
if (cfgOptionBool(cfgOptForce))
|
||||
{
|
||||
strCatZ(log, "is zero size");
|
||||
strCatFmt(
|
||||
log, "exists and matches size %" PRIu64 " and modification time %" PRIu64, file.size,
|
||||
(uint64_t)file.timestamp);
|
||||
}
|
||||
// The file matched the manifest checksum so did not need to be copied
|
||||
// Else a checksum delta or file is zero-length
|
||||
else
|
||||
strCatZ(log, "matches backup");
|
||||
{
|
||||
strCatZ(log, "exists and ");
|
||||
|
||||
// No need to copy zero-length files
|
||||
if (file.size == 0)
|
||||
{
|
||||
strCatZ(log, "is zero size");
|
||||
}
|
||||
// The file matched the manifest checksum so did not need to be copied
|
||||
else
|
||||
strCatZ(log, "matches backup");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Add bundle info
|
||||
strCatZ(log, " (");
|
||||
|
||||
if (file.bundleId != 0)
|
||||
{
|
||||
ASSERT(varUInt64(protocolParallelJobKey(job)) == file.bundleId);
|
||||
|
||||
strCatZ(log, "bundle ");
|
||||
|
||||
if (file.reference != NULL)
|
||||
strCatFmt(log, "%s/", strZ(file.reference));
|
||||
|
||||
strCatFmt(log, "%" PRIu64 "/%" PRIu64 ", ", file.bundleId, file.bundleOffset);
|
||||
}
|
||||
|
||||
// Add size and percent complete
|
||||
sizeRestored += file.size;
|
||||
strCatFmt(log, "%s, %.2lf%%)", strZ(strSizeFormat(file.size)), (double)sizeRestored * 100.00 / (double)sizeTotal);
|
||||
|
||||
// If not zero-length add the checksum
|
||||
if (file.size != 0 && !zeroed)
|
||||
strCatFmt(log, " checksum %s", file.checksumSha1);
|
||||
|
||||
LOG_DETAIL_PID(protocolParallelJobProcessId(job), strZ(log));
|
||||
}
|
||||
|
||||
// Add size and percent complete
|
||||
sizeRestored += file.size;
|
||||
strCatFmt(log, " (%s, %.2lf%%)", strZ(strSizeFormat(file.size)), (double)sizeRestored * 100.00 / (double)sizeTotal);
|
||||
|
||||
// If not zero-length add the checksum
|
||||
if (file.size != 0 && !zeroed)
|
||||
strCatFmt(log, " checksum %s", file.checksumSha1);
|
||||
|
||||
LOG_DETAIL_PID(protocolParallelJobProcessId(job), strZ(log));
|
||||
}
|
||||
MEM_CONTEXT_TEMP_END();
|
||||
|
||||
@ -2179,68 +2245,100 @@ static ProtocolParallelJob *restoreJobCallback(void *data, unsigned int clientId
|
||||
RestoreJobData *jobData = data;
|
||||
|
||||
// Determine where to begin scanning the queue (we'll stop when we get back here)
|
||||
ProtocolCommand *command = protocolCommandNew(PROTOCOL_COMMAND_RESTORE_FILE);
|
||||
PackWrite *param = NULL;
|
||||
int queueIdx = (int)(clientIdx % lstSize(jobData->queueList));
|
||||
int queueEnd = queueIdx;
|
||||
|
||||
// Create restore job
|
||||
do
|
||||
{
|
||||
List *queue = *(List **)lstGet(jobData->queueList, (unsigned int)queueIdx);
|
||||
bool fileAdded = false;
|
||||
const String *fileName = NULL;
|
||||
uint64_t bundleId = 0;
|
||||
const String *reference = NULL;
|
||||
|
||||
if (!lstEmpty(queue))
|
||||
while (!lstEmpty(queue))
|
||||
{
|
||||
const ManifestFile file = manifestFileUnpack(jobData->manifest, *(ManifestFilePack **)lstGet(queue, 0));
|
||||
|
||||
// Create restore job
|
||||
ProtocolCommand *command = protocolCommandNew(PROTOCOL_COMMAND_RESTORE_FILE);
|
||||
PackWrite *const param = protocolCommandParam(command);
|
||||
// Break if bundled files have already been added and 1) the bundleId has changed or 2) the reference has changed
|
||||
if (fileAdded && (bundleId != file.bundleId || !strEq(reference, file.reference)))
|
||||
break;
|
||||
|
||||
const String *const repoPath = strNewFmt(
|
||||
STORAGE_REPO_BACKUP "/%s/",
|
||||
strZ(file.reference != NULL ? file.reference : manifestData(jobData->manifest)->backupLabel));
|
||||
// Add common parameters before first file
|
||||
if (param == NULL)
|
||||
{
|
||||
param = protocolCommandParam(command);
|
||||
|
||||
const String *const repoPath = strNewFmt(
|
||||
STORAGE_REPO_BACKUP "/%s/",
|
||||
strZ(file.reference != NULL ? file.reference : manifestData(jobData->manifest)->backupLabel));
|
||||
|
||||
if (file.bundleId != 0)
|
||||
{
|
||||
pckWriteStrP(param, strNewFmt("%s" MANIFEST_PATH_BUNDLE "/%" PRIu64, strZ(repoPath), file.bundleId));
|
||||
bundleId = file.bundleId;
|
||||
reference = file.reference;
|
||||
}
|
||||
else
|
||||
{
|
||||
pckWriteStrP(
|
||||
param,
|
||||
strNewFmt(
|
||||
"%s%s%s", strZ(repoPath), strZ(file.name),
|
||||
strZ(compressExtStr(manifestData(jobData->manifest)->backupOptionCompressType))));
|
||||
fileName = file.name;
|
||||
}
|
||||
|
||||
pckWriteU32P(param, jobData->repoIdx);
|
||||
pckWriteU32P(param, manifestData(jobData->manifest)->backupOptionCompressType);
|
||||
pckWriteTimeP(param, manifestData(jobData->manifest)->backupTimestampCopyStart);
|
||||
pckWriteBoolP(param, cfgOptionBool(cfgOptDelta));
|
||||
pckWriteBoolP(param, cfgOptionBool(cfgOptDelta) && cfgOptionBool(cfgOptForce));
|
||||
pckWriteStrP(param, jobData->cipherSubPass);
|
||||
|
||||
fileAdded = true;
|
||||
}
|
||||
|
||||
pckWriteStrP(param, restoreFilePgPath(jobData->manifest, file.name));
|
||||
pckWriteStrP(param, STR(file.checksumSha1));
|
||||
pckWriteU64P(param, file.size);
|
||||
pckWriteTimeP(param, file.timestamp);
|
||||
pckWriteModeP(param, file.mode);
|
||||
pckWriteBoolP(param, restoreFileZeroed(file.name, jobData->zeroExp));
|
||||
pckWriteStrP(param, restoreManifestOwnerReplace(file.user, jobData->rootReplaceUser));
|
||||
pckWriteStrP(param, restoreManifestOwnerReplace(file.group, jobData->rootReplaceGroup));
|
||||
|
||||
if (file.bundleId != 0)
|
||||
{
|
||||
pckWriteStrP(param, strNewFmt("%s" MANIFEST_PATH_BUNDLE "/%" PRIu64, strZ(repoPath), file.bundleId));
|
||||
pckWriteBoolP(param, true);
|
||||
pckWriteU64P(param, file.bundleOffset);
|
||||
pckWriteU64P(param, file.sizeRepo);
|
||||
}
|
||||
else
|
||||
{
|
||||
pckWriteStrP(
|
||||
param,
|
||||
strNewFmt(
|
||||
"%s%s%s", strZ(repoPath), strZ(file.name),
|
||||
strZ(compressExtStr(manifestData(jobData->manifest)->backupOptionCompressType))));
|
||||
pckWriteBoolP(param, false);
|
||||
}
|
||||
|
||||
pckWriteU32P(param, jobData->repoIdx);
|
||||
pckWriteU32P(param, manifestData(jobData->manifest)->backupOptionCompressType);
|
||||
pckWriteStrP(param, restoreFilePgPath(jobData->manifest, file.name));
|
||||
pckWriteStrP(param, STR(file.checksumSha1));
|
||||
pckWriteBoolP(param, restoreFileZeroed(file.name, jobData->zeroExp));
|
||||
pckWriteU64P(param, file.size);
|
||||
pckWriteTimeP(param, file.timestamp);
|
||||
pckWriteModeP(param, file.mode);
|
||||
pckWriteStrP(param, restoreManifestOwnerReplace(file.user, jobData->rootReplaceUser));
|
||||
pckWriteStrP(param, restoreManifestOwnerReplace(file.group, jobData->rootReplaceGroup));
|
||||
pckWriteTimeP(param, manifestData(jobData->manifest)->backupTimestampCopyStart);
|
||||
pckWriteBoolP(param, cfgOptionBool(cfgOptDelta));
|
||||
pckWriteBoolP(param, cfgOptionBool(cfgOptDelta) && cfgOptionBool(cfgOptForce));
|
||||
pckWriteStrP(param, jobData->cipherSubPass);
|
||||
pckWriteStrP(param, file.name);
|
||||
|
||||
// Remove job from the queue
|
||||
lstRemoveIdx(queue, 0);
|
||||
|
||||
// Break if the file is not bundled
|
||||
if (bundleId == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (fileAdded)
|
||||
{
|
||||
// Assign job to result
|
||||
MEM_CONTEXT_PRIOR_BEGIN()
|
||||
{
|
||||
result = protocolParallelJobNew(VARSTR(file.name), command);
|
||||
result = protocolParallelJobNew(bundleId != 0 ? VARUINT64(bundleId) : VARSTR(fileName), command);
|
||||
}
|
||||
MEM_CONTEXT_PRIOR_END();
|
||||
|
||||
// Break out of the loop early since we found a job
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -235,6 +235,11 @@ manifestFilePack(const Manifest *const manifest, const ManifestFile *const file)
|
||||
FUNCTION_TEST_PARAM(MANIFEST_FILE, file);
|
||||
FUNCTION_TEST_END();
|
||||
|
||||
ASSERT(manifest != NULL);
|
||||
ASSERT(file != NULL);
|
||||
|
||||
CHECK(AssertError, file->size > 0 || file->bundleId == 0, "zero-length files may not be bundled");
|
||||
|
||||
uint8_t buffer[512];
|
||||
size_t bufferPos = 0;
|
||||
|
||||
|
@ -178,11 +178,26 @@ testRun(void)
|
||||
"acefile", .compressType = compressTypeGz, .cipherType = cipherTypeAes256Cbc, .cipherPass = "badpass",
|
||||
.comment = "create a compressed encrypted repo file");
|
||||
|
||||
List *fileList = lstNewP(sizeof(RestoreFile));
|
||||
|
||||
RestoreFile file =
|
||||
{
|
||||
.name = STRDEF("normal"),
|
||||
.checksum = STRDEF("ffffffffffffffffffffffffffffffffffffffff"),
|
||||
.size = 7,
|
||||
.timeModified = 1557432154,
|
||||
.mode = 0600,
|
||||
.zero = false,
|
||||
.user = NULL,
|
||||
.group = NULL,
|
||||
};
|
||||
|
||||
lstAdd(fileList, &file);
|
||||
|
||||
TEST_ERROR(
|
||||
restoreFile(
|
||||
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, 0, NULL,
|
||||
compressTypeGz, STRDEF("normal"), STRDEF("ffffffffffffffffffffffffffffffffffffffff"), false, 7, 1557432154, 0600,
|
||||
TEST_USER_STR, TEST_GROUP_STR, 0, false, false, STRDEF("badpass")),
|
||||
strNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(repoFileReferenceFull), strZ(repoFile1)), repoIdx, compressTypeGz,
|
||||
0, false, false, STRDEF("badpass"), fileList),
|
||||
ChecksumError,
|
||||
"error restoring 'normal': actual checksum 'd1cd8a7d11daa26814b93eb604e1d49ab4b43770' does not match expected checksum"
|
||||
" 'ffffffffffffffffffffffffffffffffffffffff'");
|
||||
@ -2220,7 +2235,8 @@ testRun(void)
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/postgresql.conf (10B, [PCT]) checksum"
|
||||
" 1a49a3c2240449fee1422e4afcf44d5b96378511\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/PG_VERSION (4B, [PCT]) checksum b74d60e763728399bcd3fb63f7dd1f97b46c6b44\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/size-mismatch (1B, [PCT]) checksum c032adc1ff629c9b66f22749ad667e6beadf144b\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/size-mismatch (1B, [PCT]) checksum"
|
||||
" c032adc1ff629c9b66f22749ad667e6beadf144b\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/tablespace_map (0B, [PCT])\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/pg_tblspc/1/16384/PG_VERSION (4B, [PCT])"
|
||||
" checksum b74d60e763728399bcd3fb63f7dd1f97b46c6b44\n"
|
||||
@ -2279,6 +2295,9 @@ testRun(void)
|
||||
hrnCfgArgRawZ(argList, cfgOptLinkMap, "pg_xact=../xact");
|
||||
HRN_CFG_LOAD(cfgCmdRestore, argList);
|
||||
|
||||
#define TEST_LABEL_FULL "20161219-212741F"
|
||||
#define TEST_LABEL_DIFF "20161219-212741F_20161219-212800D"
|
||||
#define TEST_LABEL_INCR "20161219-212741F_20161219-212900I"
|
||||
#define TEST_LABEL "20161219-212741F_20161219-212918I"
|
||||
#define TEST_PGDATA MANIFEST_TARGET_PGDATA "/"
|
||||
#define TEST_REPO_PATH STORAGE_REPO_BACKUP "/" TEST_LABEL "/" TEST_PGDATA
|
||||
@ -2290,7 +2309,7 @@ testRun(void)
|
||||
manifest->pub.data.backupLabel = STRDEF(TEST_LABEL);
|
||||
manifest->pub.data.pgVersion = PG_VERSION_10;
|
||||
manifest->pub.data.pgCatalogVersion = hrnPgCatalogVersion(PG_VERSION_10);
|
||||
manifest->pub.data.backupType = backupTypeFull;
|
||||
manifest->pub.data.backupType = backupTypeIncr;
|
||||
manifest->pub.data.backupTimestampCopyStart = 1482182861; // So file timestamps should be less than this
|
||||
|
||||
// Data directory
|
||||
@ -2319,6 +2338,15 @@ testRun(void)
|
||||
.checksumSha1 = "5e2b96c19c4f5c63a5afa2de504d29fe64a4c908"});
|
||||
HRN_STORAGE_PUT(storageRepoWrite(), TEST_REPO_PATH PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, fileBuffer);
|
||||
|
||||
// global/888
|
||||
manifestFileAdd(
|
||||
manifest,
|
||||
&(ManifestFile){
|
||||
.name = STRDEF(TEST_PGDATA PG_PATH_GLOBAL "/888"), .size = 0, .timestamp = 1482182860,
|
||||
.mode = 0600, .group = groupName(), .user = userName(),
|
||||
.checksumSha1 = HASH_TYPE_SHA1_ZERO});
|
||||
HRN_STORAGE_PUT_EMPTY(storageRepoWrite(), TEST_REPO_PATH PG_PATH_GLOBAL "/888");
|
||||
|
||||
// global/999
|
||||
manifestFileAdd(
|
||||
manifest,
|
||||
@ -2334,9 +2362,28 @@ testRun(void)
|
||||
&(ManifestFile){
|
||||
.name = STRDEF(TEST_PGDATA PG_FILE_PGVERSION), .size = 4, .sizeRepo = 4, .timestamp = 1482182860,
|
||||
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 1, .bundleOffset = 0,
|
||||
.reference = STRDEF(TEST_LABEL), .checksumSha1 = "8dbabb96e032b8d9f1993c0e4b9141e71ade01a1"});
|
||||
.reference = NULL, .checksumSha1 = "8dbabb96e032b8d9f1993c0e4b9141e71ade01a1"});
|
||||
manifestFileAdd(
|
||||
manifest,
|
||||
&(ManifestFile){
|
||||
.name = STRDEF(TEST_PGDATA "yyy"), .size = 3, .sizeRepo = 3, .timestamp = 1482182860,
|
||||
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 1, .bundleOffset = 8,
|
||||
.reference = NULL, .checksumSha1 = "186154712b2d5f6791d85b9a0987b98fa231779c"});
|
||||
manifestFileAdd(
|
||||
manifest,
|
||||
&(ManifestFile){
|
||||
.name = STRDEF(TEST_PGDATA "xxxxx"), .size = 5, .sizeRepo = 5, .timestamp = 1482182860,
|
||||
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 1, .bundleOffset = 11,
|
||||
.reference = NULL, .checksumSha1 = "9addbf544119efa4a64223b649750a510f0d463f"});
|
||||
manifestFileAdd(
|
||||
manifest,
|
||||
&(ManifestFile){
|
||||
.name = STRDEF(TEST_PGDATA "zz"), .size = 2, .sizeRepo = 2, .timestamp = 1482182860,
|
||||
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 1, .bundleOffset = 17,
|
||||
.reference = NULL, .checksumSha1 = "d7dacae2c968388960bf8970080a980ed5c5dcb7"});
|
||||
HRN_STORAGE_PUT_Z(
|
||||
storageRepoWrite(), STORAGE_REPO_BACKUP "/" TEST_LABEL "/bundle/1", PG_VERSION_94_STR "\n" PG_VERSION_94_STR "\n");
|
||||
storageRepoWrite(), STORAGE_REPO_BACKUP "/" TEST_LABEL "/bundle/1",
|
||||
PG_VERSION_94_STR "\n" PG_VERSION_94_STR "\nyyyxxxxxAzzA");
|
||||
|
||||
// base directory
|
||||
manifestPathAdd(
|
||||
@ -2371,6 +2418,51 @@ testRun(void)
|
||||
.checksumSha1 = "4d7b2a36c5387decf799352a3751883b7ceb96aa"});
|
||||
HRN_STORAGE_PUT(storageRepoWrite(), TEST_REPO_PATH "base/1/2", fileBuffer);
|
||||
|
||||
// base/1/10
|
||||
fileBuffer = bufNew(8194);
|
||||
memset(bufPtr(fileBuffer), 10, bufSize(fileBuffer));
|
||||
bufPtr(fileBuffer)[0] = 0xFF;
|
||||
bufPtr(fileBuffer)[8193] = 0xFF;
|
||||
bufUsedSet(fileBuffer, bufSize(fileBuffer));
|
||||
|
||||
manifestFileAdd(
|
||||
manifest,
|
||||
&(ManifestFile){
|
||||
.name = STRDEF(TEST_PGDATA "base/1/10"), .size = 8192, .sizeRepo = 8192, .timestamp = 1482182860,
|
||||
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 1, .bundleOffset = 1,
|
||||
.reference = STRDEF(TEST_LABEL_FULL), .checksumSha1 = "28757c756c03c37aca13692cb719c18d1510c190"});
|
||||
HRN_STORAGE_PUT(storageRepoWrite(), STORAGE_REPO_BACKUP "/" TEST_LABEL_FULL "/bundle/1", fileBuffer);
|
||||
|
||||
// base/1/20 and base/1/21
|
||||
manifestFileAdd(
|
||||
manifest,
|
||||
&(ManifestFile){
|
||||
.name = STRDEF(TEST_PGDATA "base/1/20"), .size = 1, .sizeRepo = 1, .timestamp = 1482182860,
|
||||
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 2, .bundleOffset = 1,
|
||||
.reference = STRDEF(TEST_LABEL_DIFF), .checksumSha1 = "c032adc1ff629c9b66f22749ad667e6beadf144b"});
|
||||
manifestFileAdd(
|
||||
manifest,
|
||||
&(ManifestFile){
|
||||
.name = STRDEF(TEST_PGDATA "base/1/21"), .size = 1, .sizeRepo = 1, .timestamp = 1482182860,
|
||||
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 2, .bundleOffset = 2,
|
||||
.reference = STRDEF(TEST_LABEL_DIFF), .checksumSha1 = "e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98"});
|
||||
HRN_STORAGE_PUT_Z(storageRepoWrite(), STORAGE_REPO_BACKUP "/" TEST_LABEL_DIFF "/bundle/2", "aXb");
|
||||
|
||||
// base/1/30 and base/1/31
|
||||
manifestFileAdd(
|
||||
manifest,
|
||||
&(ManifestFile){
|
||||
.name = STRDEF(TEST_PGDATA "base/1/30"), .size = 1, .sizeRepo = 1, .timestamp = 1482182860,
|
||||
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 2, .bundleOffset = 1,
|
||||
.reference = STRDEF(TEST_LABEL_INCR), .checksumSha1 = "c032adc1ff629c9b66f22749ad667e6beadf144b"});
|
||||
manifestFileAdd(
|
||||
manifest,
|
||||
&(ManifestFile){
|
||||
.name = STRDEF(TEST_PGDATA "base/1/31"), .size = 1, .sizeRepo = 1, .timestamp = 1482182860,
|
||||
.mode = 0600, .group = groupName(), .user = userName(), .bundleId = 2, .bundleOffset = 2,
|
||||
.reference = STRDEF(TEST_LABEL_INCR), .checksumSha1 = "e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98"});
|
||||
HRN_STORAGE_PUT_Z(storageRepoWrite(), STORAGE_REPO_BACKUP "/" TEST_LABEL_INCR "/bundle/2", "aXb");
|
||||
|
||||
// system db name
|
||||
manifestDbAdd(manifest, &(ManifestDb){.name = STRDEF("template1"), .id = 1, .lastSystemId = 12168});
|
||||
|
||||
@ -2532,6 +2624,9 @@ testRun(void)
|
||||
HRN_STORAGE_PATH_CREATE(storagePgWrite(), "bogus1/bogus2");
|
||||
HRN_STORAGE_PATH_CREATE(storagePgWrite(), PG_PATH_GLOBAL "/bogus3");
|
||||
|
||||
// Create yyy file so it is not copied
|
||||
HRN_STORAGE_PUT_Z(storagePgWrite(), "yyy", "yyy", .modeFile = 0600);
|
||||
|
||||
// Add a few bogus links to be deleted
|
||||
THROW_ON_SYS_ERROR(
|
||||
symlink("../wal", strZ(strNewFmt("%s/pg_wal2", strZ(pgPath)))) == -1, FileOpenError,
|
||||
@ -2578,10 +2673,28 @@ testRun(void)
|
||||
" checksum 8dbabb96e032b8d9f1993c0e4b9141e71ade01a1\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/16384/PG_VERSION (4B, [PCT])"
|
||||
" checksum 8dbabb96e032b8d9f1993c0e4b9141e71ade01a1\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/PG_VERSION (4B, [PCT]) checksum"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/10 (bundle 20161219-212741F/1/1, 8KB, [PCT])"
|
||||
" checksum 28757c756c03c37aca13692cb719c18d1510c190\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/PG_VERSION (bundle 1/0, 4B, [PCT]) checksum"
|
||||
" 8dbabb96e032b8d9f1993c0e4b9141e71ade01a1\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/PG_VERSION (4B, [PCT]) checksum 8dbabb96e032b8d9f1993c0e4b9141e71ade01a1\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/PG_VERSION (bundle 1/4, 4B, [PCT]) checksum"
|
||||
" 8dbabb96e032b8d9f1993c0e4b9141e71ade01a1\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/yyy - exists and matches backup (bundle 1/8, 3B, [PCT]) checksum"
|
||||
" 186154712b2d5f6791d85b9a0987b98fa231779c\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/xxxxx (bundle 1/11, 5B, [PCT]) checksum"
|
||||
" 9addbf544119efa4a64223b649750a510f0d463f\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/zz (bundle 1/17, 2B, [PCT]) checksum"
|
||||
" d7dacae2c968388960bf8970080a980ed5c5dcb7\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/20 (bundle 20161219-212741F_20161219-212800D/2/1, 1B, [PCT]) checksum"
|
||||
" c032adc1ff629c9b66f22749ad667e6beadf144b\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/21 (bundle 20161219-212741F_20161219-212800D/2/2, 1B, [PCT]) checksum"
|
||||
" e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/30 (bundle 20161219-212741F_20161219-212900I/2/1, 1B, [PCT]) checksum"
|
||||
" c032adc1ff629c9b66f22749ad667e6beadf144b\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/31 (bundle 20161219-212741F_20161219-212900I/2/2, 1B, [PCT]) checksum"
|
||||
" e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/global/999 (0B, [PCT])\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/global/888 (0B, [PCT])\n"
|
||||
"P00 DETAIL: sync path '" TEST_PATH "/config'\n"
|
||||
"P00 DETAIL: sync path '" TEST_PATH "/pg'\n"
|
||||
"P00 DETAIL: sync path '" TEST_PATH "/pg/base'\n"
|
||||
@ -2595,7 +2708,7 @@ testRun(void)
|
||||
"P00 DETAIL: sync path '" TEST_PATH "/pg/pg_tblspc/1/PG_10_201707211'\n"
|
||||
"P00 INFO: restore global/pg_control (performed last to ensure aborted restores cannot be started)\n"
|
||||
"P00 DETAIL: sync path '" TEST_PATH "/pg/global'\n"
|
||||
"P00 INFO: restore size = [SIZE], file total = 11");
|
||||
"P00 INFO: restore size = [SIZE], file total = 20");
|
||||
|
||||
testRestoreCompare(
|
||||
storagePg(), NULL, manifest,
|
||||
@ -2603,7 +2716,12 @@ testRun(void)
|
||||
"PG_VERSION {file, s=4, t=1482182860}\n"
|
||||
"base {path}\n"
|
||||
"base/1 {path}\n"
|
||||
"base/1/10 {file, s=8192, t=1482182860}\n"
|
||||
"base/1/2 {file, s=8192, t=1482182860}\n"
|
||||
"base/1/20 {file, s=1, t=1482182860}\n"
|
||||
"base/1/21 {file, s=1, t=1482182860}\n"
|
||||
"base/1/30 {file, s=1, t=1482182860}\n"
|
||||
"base/1/31 {file, s=1, t=1482182860}\n"
|
||||
"base/1/PG_VERSION {file, s=4, t=1482182860}\n"
|
||||
"base/16384 {path}\n"
|
||||
"base/16384/16385 {file, s=16384, t=1482182860}\n"
|
||||
@ -2612,6 +2730,7 @@ testRun(void)
|
||||
"base/32768/32769 {file, s=32768, t=1482182860}\n"
|
||||
"base/32768/PG_VERSION {file, s=4, t=1482182860}\n"
|
||||
"global {path}\n"
|
||||
"global/888 {file, s=0, t=1482182860}\n"
|
||||
"global/999 {file, s=0, t=1482182860}\n"
|
||||
"global/pg_control {file, s=8192, t=1482182860}\n"
|
||||
"pg_hba.conf {link, d=../config/pg_hba.conf}\n"
|
||||
@ -2619,7 +2738,10 @@ testRun(void)
|
||||
"pg_tblspc/1 {link, d=" TEST_PATH "/ts/1}\n"
|
||||
"pg_wal {link, d=../wal}\n"
|
||||
"pg_xact {link, d=../xact}\n"
|
||||
"postgresql.conf {link, d=../config/postgresql.conf}\n");
|
||||
"postgresql.conf {link, d=../config/postgresql.conf}\n"
|
||||
"xxxxx {file, s=5, t=1482182860}\n"
|
||||
"yyy {file, s=3, t=1482182860}\n"
|
||||
"zz {file, s=2, t=1482182860}\n");
|
||||
|
||||
testRestoreCompare(
|
||||
storagePg(), STRDEF("pg_tblspc/1"), manifest,
|
||||
@ -2726,11 +2848,28 @@ testRun(void)
|
||||
" checksum 8dbabb96e032b8d9f1993c0e4b9141e71ade01a1\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/16384/PG_VERSION - exists and matches backup (4B, [PCT])"
|
||||
" checksum 8dbabb96e032b8d9f1993c0e4b9141e71ade01a1\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/PG_VERSION - exists and matches backup (4B, [PCT])"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/10 - exists and matches backup (bundle 20161219-212741F/1/1, 8KB,"
|
||||
" [PCT]) checksum 28757c756c03c37aca13692cb719c18d1510c190\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/PG_VERSION - exists and matches backup (bundle 1/0, 4B, [PCT])"
|
||||
" checksum 8dbabb96e032b8d9f1993c0e4b9141e71ade01a1\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/PG_VERSION - exists and matches backup (4B, [PCT])"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/PG_VERSION - exists and matches backup (bundle 1/4, 4B, [PCT])"
|
||||
" checksum 8dbabb96e032b8d9f1993c0e4b9141e71ade01a1\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/yyy - exists and matches backup (bundle 1/8, 3B, [PCT]) checksum"
|
||||
" 186154712b2d5f6791d85b9a0987b98fa231779c\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/xxxxx - exists and matches backup (bundle 1/11, 5B, [PCT]) checksum"
|
||||
" 9addbf544119efa4a64223b649750a510f0d463f\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/zz - exists and matches backup (bundle 1/17, 2B, [PCT]) checksum"
|
||||
" d7dacae2c968388960bf8970080a980ed5c5dcb7\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/20 - exists and matches backup (bundle"
|
||||
" 20161219-212741F_20161219-212800D/2/1, 1B, [PCT]) checksum c032adc1ff629c9b66f22749ad667e6beadf144b\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/21 - exists and matches backup (bundle"
|
||||
" 20161219-212741F_20161219-212800D/2/2, 1B, [PCT]) checksum e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/30 - exists and matches backup (bundle"
|
||||
" 20161219-212741F_20161219-212900I/2/1, 1B, [PCT]) checksum c032adc1ff629c9b66f22749ad667e6beadf144b\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/base/1/31 - exists and matches backup (bundle"
|
||||
" 20161219-212741F_20161219-212900I/2/2, 1B, [PCT]) checksum e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/global/999 - exists and is zero size (0B, [PCT])\n"
|
||||
"P01 DETAIL: restore file " TEST_PATH "/pg/global/888 - exists and is zero size (0B, [PCT])\n"
|
||||
"P00 DETAIL: sync path '" TEST_PATH "/config'\n"
|
||||
"P00 DETAIL: sync path '" TEST_PATH "/pg'\n"
|
||||
"P00 DETAIL: sync path '" TEST_PATH "/pg/base'\n"
|
||||
@ -2744,7 +2883,7 @@ testRun(void)
|
||||
"P00 DETAIL: sync path '" TEST_PATH "/pg/pg_tblspc/1/PG_10_201707211'\n"
|
||||
"P00 INFO: restore global/pg_control (performed last to ensure aborted restores cannot be started)\n"
|
||||
"P00 DETAIL: sync path '" TEST_PATH "/pg/global'\n"
|
||||
"P00 INFO: restore size = [SIZE], file total = 11");
|
||||
"P00 INFO: restore size = [SIZE], file total = 20");
|
||||
|
||||
// Check stanza archive spool path was removed
|
||||
TEST_STORAGE_LIST_EMPTY(storageSpool(), STORAGE_PATH_ARCHIVE);
|
||||
|
Loading…
Reference in New Issue
Block a user