2019-07-25 14:34:16 -04:00
|
|
|
/***********************************************************************************************************************************
|
|
|
|
Test Backup Command
|
|
|
|
***********************************************************************************************************************************/
|
2019-12-13 17:14:26 -05:00
|
|
|
#include <utime.h>
|
|
|
|
|
|
|
|
#include "command/stanza/create.h"
|
|
|
|
#include "command/stanza/upgrade.h"
|
2020-03-18 10:10:10 -04:00
|
|
|
#include "common/crypto/hash.h"
|
2019-07-25 14:34:16 -04:00
|
|
|
#include "common/io/bufferRead.h"
|
|
|
|
#include "common/io/bufferWrite.h"
|
|
|
|
#include "common/io/io.h"
|
2020-05-18 19:11:26 -04:00
|
|
|
#include "postgres/interface/static.vendor.h"
|
2019-07-25 14:34:16 -04:00
|
|
|
#include "storage/helper.h"
|
|
|
|
#include "storage/posix/storage.h"
|
|
|
|
|
|
|
|
#include "common/harnessConfig.h"
|
2019-12-13 17:14:26 -05:00
|
|
|
#include "common/harnessPq.h"
|
|
|
|
|
|
|
|
/***********************************************************************************************************************************
|
2020-03-18 10:10:10 -04:00
|
|
|
Get a list of all files in the backup and a redacted version of the manifest that can be tested against a static string
|
2019-12-13 17:14:26 -05:00
|
|
|
***********************************************************************************************************************************/
|
|
|
|
typedef struct TestBackupValidateCallbackData
|
|
|
|
{
|
|
|
|
const Storage *storage; // Storage object when needed (e.g. fileCompressed = true)
|
|
|
|
const String *path; // Subpath when storage is specified
|
2020-03-18 10:10:10 -04:00
|
|
|
const Manifest *manifest; // Manifest to check for files/links/paths
|
|
|
|
const ManifestData *manifestData; // Manifest data
|
2019-12-13 17:14:26 -05:00
|
|
|
String *content; // String where content should be added
|
|
|
|
} TestBackupValidateCallbackData;
|
|
|
|
|
|
|
|
void
|
|
|
|
testBackupValidateCallback(void *callbackData, const StorageInfo *info)
|
|
|
|
{
|
|
|
|
TestBackupValidateCallbackData *data = callbackData;
|
|
|
|
|
|
|
|
// Don't include . when it is a path (we'll still include it when it is a link so we can see the destination)
|
|
|
|
if (info->type == storageTypePath && strEq(info->name, DOT_STR))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Don't include backup.manifest or copy. We'll test that they are present elsewhere
|
|
|
|
if (info->type == storageTypeFile &&
|
|
|
|
(strEqZ(info->name, BACKUP_MANIFEST_FILE) || strEqZ(info->name, BACKUP_MANIFEST_FILE INFO_COPY_EXT)))
|
|
|
|
return;
|
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
// Get manifest name
|
|
|
|
const String *manifestName = info->name;
|
|
|
|
|
2020-07-30 07:49:06 -04:00
|
|
|
strCatFmt(data->content, "%s {", strZ(info->name));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
switch (info->type)
|
|
|
|
{
|
|
|
|
case storageTypeFile:
|
|
|
|
{
|
2020-06-24 12:09:24 -04:00
|
|
|
strCatZ(data->content, "file");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
// Calculate checksum/size and decompress if needed
|
|
|
|
// ---------------------------------------------------------------------------------------------------------------------
|
|
|
|
StorageRead *read = storageNewReadP(
|
2020-07-30 07:49:06 -04:00
|
|
|
data->storage, data->path != NULL ? strNewFmt("%s/%s", strZ(data->path), strZ(info->name)) : info->name);
|
2020-03-06 14:41:03 -05:00
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
if (data->manifestData->backupOptionCompressType != compressTypeNone)
|
2019-12-13 17:14:26 -05:00
|
|
|
{
|
2020-03-18 10:10:10 -04:00
|
|
|
ioFilterGroupAdd(
|
|
|
|
ioReadFilterGroup(storageReadIo(read)), decompressFilter(data->manifestData->backupOptionCompressType));
|
|
|
|
manifestName = strSubN(
|
|
|
|
info->name, 0, strSize(info->name) - strSize(compressExtStr(data->manifestData->backupOptionCompressType)));
|
|
|
|
}
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), cryptoHashNew(HASH_TYPE_SHA1_STR));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
uint64_t size = bufUsed(storageGetP(read));
|
|
|
|
const String *checksum = varStr(
|
|
|
|
ioFilterGroupResult(ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE_STR));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
strCatFmt(data->content, ", s=%" PRIu64, size);
|
|
|
|
|
|
|
|
// Check against the manifest
|
2020-03-18 10:10:10 -04:00
|
|
|
// ---------------------------------------------------------------------------------------------------------------------
|
2019-12-13 17:14:26 -05:00
|
|
|
const ManifestFile *file = manifestFileFind(data->manifest, manifestName);
|
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
// Test size and repo-size. If compressed then set the repo-size to size so it will not be in test output. Even the same
|
|
|
|
// compression algorithm can give slightly different results based on the version so repo-size is not deterministic for
|
|
|
|
// compression.
|
2019-12-13 17:14:26 -05:00
|
|
|
if (size != file->size)
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "'%s' size does match manifest", strZ(manifestName));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
if (info->size != file->sizeRepo)
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "'%s' repo size does match manifest", strZ(manifestName));
|
2020-03-18 10:10:10 -04:00
|
|
|
|
|
|
|
if (data->manifestData->backupOptionCompressType != compressTypeNone)
|
|
|
|
((ManifestFile *)file)->sizeRepo = file->size;
|
|
|
|
|
|
|
|
// Test the checksum. pg_control and WAL headers have different checksums depending on cpu architecture so remove
|
|
|
|
// the checksum from the test output.
|
|
|
|
if (!strEqZ(checksum, file->checksumSha1))
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "'%s' checksum does match manifest", strZ(manifestName));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
if (strEqZ(manifestName, MANIFEST_TARGET_PGDATA "/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL) ||
|
|
|
|
strBeginsWith(
|
2020-07-30 07:49:06 -04:00
|
|
|
manifestName, strNewFmt(MANIFEST_TARGET_PGDATA "/%s/", strZ(pgWalPath(data->manifestData->pgVersion)))))
|
2020-03-18 10:10:10 -04:00
|
|
|
{
|
|
|
|
((ManifestFile *)file)->checksumSha1[0] = '\0';
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test mode, user, group. These values are not in the manifest but we know what they should be based on the default
|
|
|
|
// mode and current user/group.
|
2019-12-13 17:14:26 -05:00
|
|
|
if (info->mode != 0640)
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "'%s' mode is not 0640", strZ(manifestName));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
if (!strEqZ(info->user, testUser()))
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "'%s' user should be '%s'", strZ(manifestName), testUser());
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
if (!strEqZ(info->group, testGroup()))
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "'%s' group should be '%s'", strZ(manifestName), testGroup());
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case storageTypeLink:
|
|
|
|
{
|
2020-07-30 07:49:06 -04:00
|
|
|
strCatFmt(data->content, "link, d=%s", strZ(info->linkDestination));
|
2019-12-13 17:14:26 -05:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case storageTypePath:
|
|
|
|
{
|
2020-06-24 12:09:24 -04:00
|
|
|
strCatZ(data->content, "path");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Check against the manifest
|
2020-03-18 10:10:10 -04:00
|
|
|
// ---------------------------------------------------------------------------------------------------------------------
|
2019-12-13 17:14:26 -05:00
|
|
|
manifestPathFind(data->manifest, info->name);
|
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
// Test mode, user, group. These values are not in the manifest but we know what they should be based on the default
|
|
|
|
// mode and current user/group.
|
2019-12-13 17:14:26 -05:00
|
|
|
if (info->mode != 0750)
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "'%s' mode is not 00750", strZ(info->name));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
if (!strEqZ(info->user, testUser()))
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "'%s' user should be '%s'", strZ(info->name), testUser());
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
if (!strEqZ(info->group, testGroup()))
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "'%s' group should be '%s'", strZ(info->name), testGroup());
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case storageTypeSpecial:
|
|
|
|
{
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "unexpected special file '%s'", strZ(info->name));
|
2019-12-13 17:14:26 -05:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-24 12:09:24 -04:00
|
|
|
strCatZ(data->content, "}\n");
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
static String *
|
|
|
|
testBackupValidate(const Storage *storage, const String *path)
|
|
|
|
{
|
|
|
|
FUNCTION_HARNESS_BEGIN();
|
|
|
|
FUNCTION_HARNESS_PARAM(STORAGE, storage);
|
|
|
|
FUNCTION_HARNESS_PARAM(STRING, path);
|
|
|
|
FUNCTION_HARNESS_END();
|
|
|
|
|
|
|
|
String *result = strNew("");
|
|
|
|
|
|
|
|
MEM_CONTEXT_TEMP_BEGIN()
|
|
|
|
{
|
|
|
|
// Build a list of files in the backup path and verify against the manifest
|
2020-03-18 10:10:10 -04:00
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-07-30 07:49:06 -04:00
|
|
|
Manifest *manifest = manifestLoadFile(storage, strNewFmt("%s/" BACKUP_MANIFEST_FILE, strZ(path)), cipherTypeNone, NULL);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TestBackupValidateCallbackData callbackData =
|
|
|
|
{
|
|
|
|
.storage = storage,
|
|
|
|
.path = path,
|
|
|
|
.content = result,
|
|
|
|
.manifest = manifest,
|
2020-03-18 10:10:10 -04:00
|
|
|
.manifestData = manifestData(manifest),
|
2019-12-13 17:14:26 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
storageInfoListP(storage, path, testBackupValidateCallback, &callbackData, .recurse = true, .sortOrder = sortOrderAsc);
|
2020-03-18 10:10:10 -04:00
|
|
|
|
|
|
|
// Make sure both backup.manifest files exist since we skipped them in the callback above
|
2020-07-30 07:49:06 -04:00
|
|
|
if (!storageExistsP(storage, strNewFmt("%s/" BACKUP_MANIFEST_FILE, strZ(path))))
|
2020-03-18 10:10:10 -04:00
|
|
|
THROW(AssertError, BACKUP_MANIFEST_FILE " is missing");
|
|
|
|
|
2020-07-30 07:49:06 -04:00
|
|
|
if (!storageExistsP(storage, strNewFmt("%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strZ(path))))
|
2020-03-18 10:10:10 -04:00
|
|
|
THROW(AssertError, BACKUP_MANIFEST_FILE INFO_COPY_EXT " is missing");
|
|
|
|
|
|
|
|
// Output the manifest to a string and exclude sections that don't need validation. Note that each of these sections should
|
|
|
|
// be considered from automatic validation but adding them to the output will make the tests too noisy. One good technique
|
|
|
|
// would be to remove it from the output only after validation so new values will cause changes in the output.
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
Buffer *manifestSaveBuffer = bufNew(0);
|
|
|
|
manifestSave(manifest, ioBufferWriteNew(manifestSaveBuffer));
|
|
|
|
|
|
|
|
String *manifestEdit = strNew("");
|
|
|
|
StringList *manifestLine = strLstNewSplitZ(strTrim(strNewBuf(manifestSaveBuffer)), "\n");
|
|
|
|
bool bSkipSection = false;
|
|
|
|
|
|
|
|
for (unsigned int lineIdx = 0; lineIdx < strLstSize(manifestLine); lineIdx++)
|
|
|
|
{
|
|
|
|
const String *line = strTrim(strLstGet(manifestLine, lineIdx));
|
|
|
|
|
|
|
|
if (strChr(line, '[') == 0)
|
|
|
|
{
|
|
|
|
const String *section = strSubN(line, 1, strSize(line) - 2);
|
|
|
|
|
|
|
|
if (strEq(section, INFO_SECTION_BACKREST_STR) ||
|
|
|
|
strEq(section, MANIFEST_SECTION_BACKUP_STR) ||
|
|
|
|
strEq(section, MANIFEST_SECTION_BACKUP_DB_STR) ||
|
|
|
|
strEq(section, MANIFEST_SECTION_BACKUP_OPTION_STR) ||
|
|
|
|
strEq(section, MANIFEST_SECTION_DB_STR) ||
|
|
|
|
strEq(section, MANIFEST_SECTION_TARGET_FILE_DEFAULT_STR) ||
|
|
|
|
strEq(section, MANIFEST_SECTION_TARGET_LINK_DEFAULT_STR) ||
|
|
|
|
strEq(section, MANIFEST_SECTION_TARGET_PATH_DEFAULT_STR))
|
|
|
|
{
|
|
|
|
bSkipSection = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
bSkipSection = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!bSkipSection)
|
2020-07-30 07:49:06 -04:00
|
|
|
strCatFmt(manifestEdit, "%s\n", strZ(line));
|
2020-03-18 10:10:10 -04:00
|
|
|
}
|
|
|
|
|
2020-07-30 07:49:06 -04:00
|
|
|
strCatFmt(result, "--------\n%s\n", strZ(strTrim(manifestEdit)));
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
MEM_CONTEXT_TEMP_END();
|
|
|
|
|
|
|
|
FUNCTION_HARNESS_RESULT(STRING, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************************************************************************************
|
|
|
|
Generate pq scripts for versions of PostgreSQL
|
|
|
|
***********************************************************************************************************************************/
|
|
|
|
typedef struct TestBackupPqScriptParam
|
|
|
|
{
|
|
|
|
VAR_PARAM_HEADER;
|
|
|
|
bool startFast;
|
|
|
|
bool backupStandby;
|
|
|
|
bool errorAfterStart;
|
|
|
|
bool noWal; // Don't write test WAL segments
|
2020-03-06 14:41:03 -05:00
|
|
|
CompressType walCompressType; // Compress type for the archive files
|
2019-12-13 17:14:26 -05:00
|
|
|
unsigned int walTotal; // Total WAL to write
|
2020-01-21 10:29:46 -07:00
|
|
|
unsigned int timeline; // Timeline to use for WAL files
|
2019-12-13 17:14:26 -05:00
|
|
|
} TestBackupPqScriptParam;
|
|
|
|
|
|
|
|
#define testBackupPqScriptP(pgVersion, backupStartTime, ...) \
|
|
|
|
testBackupPqScript(pgVersion, backupStartTime, (TestBackupPqScriptParam){VAR_PARAM_INIT, __VA_ARGS__})
|
|
|
|
|
|
|
|
static void
|
|
|
|
testBackupPqScript(unsigned int pgVersion, time_t backupTimeStart, TestBackupPqScriptParam param)
|
|
|
|
{
|
2020-07-30 07:49:06 -04:00
|
|
|
const char *pg1Path = strZ(strNewFmt("%s/pg1", testPath()));
|
|
|
|
const char *pg2Path = strZ(strNewFmt("%s/pg2", testPath()));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-01-21 10:29:46 -07:00
|
|
|
// If no timeline specified then use timeline 1
|
|
|
|
param.timeline = param.timeline == 0 ? 1 : param.timeline;
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// Read pg_control to get info about the cluster
|
|
|
|
PgControl pgControl = pgControlFromFile(storagePg());
|
|
|
|
|
|
|
|
// Set archive timeout really small to save time on errors
|
|
|
|
cfgOptionSet(cfgOptArchiveTimeout, cfgSourceParam, varNewDbl(.1));
|
|
|
|
|
|
|
|
uint64_t lsnStart = ((uint64_t)backupTimeStart & 0xFFFFFF00) << 28;
|
|
|
|
uint64_t lsnStop =
|
|
|
|
lsnStart + ((param.walTotal == 0 ? 0 : param.walTotal - 1) * pgControl.walSegmentSize) + (pgControl.walSegmentSize / 2);
|
|
|
|
|
2020-07-30 07:49:06 -04:00
|
|
|
const char *lsnStartStr = strZ(pgLsnToStr(lsnStart));
|
|
|
|
const char *walSegmentStart = strZ(pgLsnToWalSegment(param.timeline, lsnStart, pgControl.walSegmentSize));
|
|
|
|
const char *lsnStopStr = strZ(pgLsnToStr(lsnStop));
|
|
|
|
const char *walSegmentStop = strZ(pgLsnToWalSegment(param.timeline, lsnStop, pgControl.walSegmentSize));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Write WAL segments to the archive
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------------------
|
|
|
|
if (!param.noWal)
|
|
|
|
{
|
|
|
|
InfoArchive *infoArchive = infoArchiveLoadFile(storageRepo(), INFO_ARCHIVE_PATH_FILE_STR, cipherTypeNone, NULL);
|
|
|
|
const String *archiveId = infoArchiveId(infoArchive);
|
2020-01-21 10:29:46 -07:00
|
|
|
StringList *walSegmentList = pgLsnRangeToWalSegmentList(
|
|
|
|
pgControl.version, param.timeline, lsnStart, lsnStop, pgControl.walSegmentSize);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
Buffer *walBuffer = bufNew((size_t)pgControl.walSegmentSize);
|
|
|
|
bufUsedSet(walBuffer, bufSize(walBuffer));
|
|
|
|
memset(bufPtr(walBuffer), 0, bufSize(walBuffer));
|
|
|
|
pgWalTestToBuffer((PgWal){.version = pgControl.version, .systemId = pgControl.systemId}, walBuffer);
|
|
|
|
const String *walChecksum = bufHex(cryptoHashOne(HASH_TYPE_SHA1_STR, walBuffer));
|
|
|
|
|
|
|
|
for (unsigned int walSegmentIdx = 0; walSegmentIdx < strLstSize(walSegmentList); walSegmentIdx++)
|
|
|
|
{
|
|
|
|
StorageWrite *write = storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
|
|
|
strNewFmt(
|
2020-07-30 07:49:06 -04:00
|
|
|
STORAGE_REPO_ARCHIVE "/%s/%s-%s%s", strZ(archiveId), strZ(strLstGet(walSegmentList, walSegmentIdx)),
|
|
|
|
strZ(walChecksum), strZ(compressExtStr(param.walCompressType))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-03-06 14:41:03 -05:00
|
|
|
if (param.walCompressType != compressTypeNone)
|
|
|
|
ioFilterGroupAdd(ioWriteFilterGroup(storageWriteIo(write)), compressFilter(param.walCompressType, 1));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
storagePutP(write, walBuffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------------------
|
|
|
|
if (pgVersion == PG_VERSION_95)
|
|
|
|
{
|
|
|
|
ASSERT(!param.backupStandby);
|
|
|
|
ASSERT(!param.errorAfterStart);
|
|
|
|
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_95, pg1Path, false, NULL, NULL),
|
|
|
|
|
|
|
|
// Get start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000),
|
|
|
|
|
|
|
|
// Start backup
|
|
|
|
HRNPQ_MACRO_ADVISORY_LOCK(1, true),
|
2019-12-14 09:53:50 -05:00
|
|
|
HRNPQ_MACRO_IS_IN_BACKUP(1, false),
|
2019-12-13 17:14:26 -05:00
|
|
|
HRNPQ_MACRO_START_BACKUP_84_95(1, param.startFast, lsnStartStr, walSegmentStart),
|
|
|
|
HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"),
|
|
|
|
HRNPQ_MACRO_TABLESPACE_LIST_0(1),
|
|
|
|
|
|
|
|
// Get copy start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000),
|
|
|
|
|
|
|
|
// Stop backup
|
|
|
|
HRNPQ_MACRO_STOP_BACKUP_LE_95(1, lsnStopStr, walSegmentStop),
|
|
|
|
|
|
|
|
// Get stop time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 2000),
|
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
}
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------------------
|
|
|
|
else if (pgVersion == PG_VERSION_96)
|
|
|
|
{
|
|
|
|
ASSERT(param.backupStandby);
|
|
|
|
ASSERT(!param.errorAfterStart);
|
|
|
|
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2020-06-25 08:02:48 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_96(1, "dbname='postgres' port=5432", PG_VERSION_96, pg1Path, false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Connect to standby
|
2020-06-25 08:02:48 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_96(2, "dbname='postgres' port=5433", PG_VERSION_96, pg2Path, true, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Get start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000),
|
|
|
|
|
|
|
|
// Start backup
|
|
|
|
HRNPQ_MACRO_ADVISORY_LOCK(1, true),
|
|
|
|
HRNPQ_MACRO_START_BACKUP_96(1, true, lsnStartStr, walSegmentStart),
|
|
|
|
HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"),
|
|
|
|
HRNPQ_MACRO_TABLESPACE_LIST_0(1),
|
|
|
|
|
|
|
|
// Wait for standby to sync
|
|
|
|
HRNPQ_MACRO_REPLAY_WAIT_96(2, lsnStartStr),
|
|
|
|
|
|
|
|
// Get copy start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000),
|
|
|
|
|
|
|
|
// Stop backup
|
|
|
|
HRNPQ_MACRO_STOP_BACKUP_96(1, lsnStopStr, walSegmentStop, false),
|
|
|
|
|
|
|
|
// Get stop time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 2000),
|
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
}
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------------------
|
|
|
|
else if (pgVersion == PG_VERSION_11)
|
|
|
|
{
|
|
|
|
ASSERT(!param.backupStandby);
|
|
|
|
|
|
|
|
if (param.errorAfterStart)
|
|
|
|
{
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2020-06-25 08:02:48 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_96(1, "dbname='postgres' port=5432", PG_VERSION_11, pg1Path, false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Get start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000),
|
|
|
|
|
|
|
|
// Start backup
|
|
|
|
HRNPQ_MACRO_ADVISORY_LOCK(1, true),
|
|
|
|
HRNPQ_MACRO_START_BACKUP_GE_10(1, param.startFast, lsnStartStr, walSegmentStart),
|
|
|
|
HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"),
|
|
|
|
HRNPQ_MACRO_TABLESPACE_LIST_1(1, 32768, "tblspc32768"),
|
|
|
|
|
|
|
|
// Get copy start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000),
|
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2020-06-25 08:02:48 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_96(1, "dbname='postgres' port=5432", PG_VERSION_11, pg1Path, false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Get start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000),
|
|
|
|
|
|
|
|
// Start backup
|
|
|
|
HRNPQ_MACRO_ADVISORY_LOCK(1, true),
|
|
|
|
HRNPQ_MACRO_START_BACKUP_GE_10(1, param.startFast, lsnStartStr, walSegmentStart),
|
2020-10-24 11:07:07 -04:00
|
|
|
HRNPQ_MACRO_DATABASE_LIST_1(1, " test1"),
|
2019-12-13 17:14:26 -05:00
|
|
|
HRNPQ_MACRO_TABLESPACE_LIST_1(1, 32768, "tblspc32768"),
|
|
|
|
|
|
|
|
// Get copy start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000),
|
|
|
|
|
|
|
|
// Stop backup
|
|
|
|
HRNPQ_MACRO_STOP_BACKUP_GE_10(1, lsnStopStr, walSegmentStop, false),
|
|
|
|
|
|
|
|
// Get stop time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 2000),
|
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
THROW_FMT(AssertError, "unsupported test version %u", pgVersion); // {uncoverable - no invalid versions in tests}
|
|
|
|
};
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
/***********************************************************************************************************************************
|
|
|
|
Test Run
|
|
|
|
***********************************************************************************************************************************/
|
|
|
|
void
|
|
|
|
testRun(void)
|
|
|
|
{
|
|
|
|
FUNCTION_HARNESS_VOID();
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// The tests expect the timezone to be UTC
|
|
|
|
setenv("TZ", "UTC", true);
|
|
|
|
|
2020-04-30 11:01:38 -04:00
|
|
|
Storage *storageTest = storagePosixNewP(strNew(testPath()), .write = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
// Start a protocol server to test the protocol directly
|
|
|
|
Buffer *serverWrite = bufNew(8192);
|
|
|
|
IoWrite *serverWriteIo = ioBufferWriteNew(serverWrite);
|
|
|
|
ioWriteOpen(serverWriteIo);
|
|
|
|
|
|
|
|
ProtocolServer *server = protocolServerNew(strNew("test"), strNew("test"), ioBufferReadNew(bufNew(0)), serverWriteIo);
|
|
|
|
bufUsedSet(serverWrite, 0);
|
|
|
|
|
|
|
|
const String *pgFile = strNew("testfile");
|
|
|
|
const String *missingFile = strNew("missing");
|
|
|
|
const String *backupLabel = strNew("20190718-155825F");
|
2020-07-30 07:49:06 -04:00
|
|
|
const String *backupPathFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(pgFile));
|
2019-07-25 14:34:16 -04:00
|
|
|
BackupFileResult result = {0};
|
|
|
|
VariantList *paramList = varLstNew();
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("segmentNumber()"))
|
|
|
|
{
|
|
|
|
TEST_RESULT_UINT(segmentNumber(pgFile), 0, "No segment number");
|
2020-07-30 07:49:06 -04:00
|
|
|
TEST_RESULT_UINT(segmentNumber(strNewFmt("%s.123", strZ(pgFile))), 123, "Segment number");
|
2019-07-25 14:34:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupFile(), backupProtocol"))
|
|
|
|
{
|
|
|
|
// Load Parameters
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--stanza=test1");
|
|
|
|
strLstAdd(argList, strNewFmt("--repo1-path=%s/repo", testPath()));
|
|
|
|
strLstAdd(argList, strNewFmt("--pg1-path=%s/pg", testPath()));
|
|
|
|
strLstAddZ(argList, "--repo1-retention-full=1");
|
2019-10-08 12:06:30 -04:00
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// Create the pg path
|
|
|
|
storagePathCreateP(storagePgWrite(), NULL, .mode = 0700);
|
|
|
|
|
|
|
|
// Pg file missing - ignoreMissing=true
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
missingFile, true, 0, true, NULL, false, 0, missingFile, false, compressTypeNone, 1, backupLabel, false,
|
|
|
|
cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
"pg file missing, ignoreMissing=true, no delta");
|
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 0, " copy/repo size 0");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultSkip, " skip file");
|
|
|
|
|
|
|
|
// Check protocol function directly
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// NULL, zero param values, ignoreMissing=true
|
|
|
|
varLstAdd(paramList, varNewStr(missingFile)); // pgFile
|
|
|
|
varLstAdd(paramList, varNewBool(true)); // pgFileIgnoreMissing
|
|
|
|
varLstAdd(paramList, varNewUInt64(0)); // pgFileSize
|
2020-04-16 14:48:16 -04:00
|
|
|
varLstAdd(paramList, varNewBool(true)); // pgFileCopyExactSize
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, NULL); // pgFileChecksum
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // pgFileChecksumPage
|
2019-12-13 17:14:26 -05:00
|
|
|
varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStr(missingFile)); // repoFile
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // repoFileHasReference
|
2020-03-06 14:41:03 -05:00
|
|
|
varLstAdd(paramList, varNewUInt(compressTypeNone)); // repoFileCompress
|
|
|
|
varLstAdd(paramList, varNewInt(0)); // repoFileCompressLevel
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStr(backupLabel)); // backupLabel
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // delta
|
2019-12-13 17:14:26 -05:00
|
|
|
varLstAdd(paramList, NULL); // cipherSubPass
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
backupProtocol(PROTOCOL_COMMAND_BACKUP_FILE_STR, paramList, server), true, "protocol backup file - skip");
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR_Z(strNewBuf(serverWrite), "{\"out\":[3,0,0,null,null]}\n", " check result");
|
2019-07-25 14:34:16 -04:00
|
|
|
bufUsedSet(serverWrite, 0);
|
|
|
|
|
|
|
|
// Pg file missing - ignoreMissing=false
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_ERROR_FMT(
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
missingFile, false, 0, true, NULL, false, 0, missingFile, false, compressTypeNone, 1, backupLabel, false,
|
|
|
|
cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
FileMissingError, "unable to open missing file '%s/pg/missing' for read", testPath());
|
|
|
|
|
|
|
|
// Create a pg file to backup
|
2019-11-17 15:10:40 -05:00
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), pgFile), BUFSTRDEF("atestfile"));
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// No prior checksum, no compression, no pageChecksum, no delta, no hasReference
|
|
|
|
|
|
|
|
// With the expected backupCopyResultCopy, unset the storageFeatureCompress bit for the storageRepo for code coverage
|
|
|
|
uint64_t feature = storageRepo()->interface.feature;
|
|
|
|
((Storage *)storageRepo())->interface.feature = feature && ((1 << storageFeatureCompress) ^ 0xFFFFFFFFFFFFFFFF);
|
|
|
|
|
2020-07-14 15:05:31 -04:00
|
|
|
// Create tmp file to make it look like a prior backup file failed partway through to ensure that retries work
|
|
|
|
TEST_RESULT_VOID(
|
2020-07-30 07:49:06 -04:00
|
|
|
storagePutP(storageNewWriteP(storageRepoWrite(), strNewFmt("%s.pgbackrest.tmp", strZ(backupPathFile))), NULL),
|
2020-07-14 15:05:31 -04:00
|
|
|
" create tmp file");
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2020-04-16 14:48:16 -04:00
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
pgFile, false, 9999999, true, NULL, false, 0, pgFile, false, compressTypeNone, 1, backupLabel, false,
|
|
|
|
cipherTypeNone, NULL),
|
2020-03-17 16:01:17 -04:00
|
|
|
"pg file exists and shrunk, no repo file, no ignoreMissing, no pageChecksum, no delta, no hasReference");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
((Storage *)storageRepo())->interface.feature = feature;
|
|
|
|
|
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 18, " copy=repo=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, " copy file");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
2019-11-17 15:10:40 -05:00
|
|
|
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
true, " copy file to repo success");
|
|
|
|
|
2020-07-14 15:05:31 -04:00
|
|
|
TEST_RESULT_BOOL(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageExistsP(storageRepoWrite(), strNewFmt("%s.pgbackrest.tmp", strZ(backupPathFile))), false,
|
2020-07-14 15:05:31 -04:00
|
|
|
" check temp file removed");
|
2019-11-17 15:10:40 -05:00
|
|
|
TEST_RESULT_VOID(storageRemoveP(storageRepoWrite(), backupPathFile), " remove repo file");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// Test pagechecksum
|
2020-03-19 13:16:05 -04:00
|
|
|
|
|
|
|
// Increase the file size but most of the following tests will still treat the file as size 9. This tests the common case
|
|
|
|
// where a file grows while a backup is running.
|
2020-05-28 10:27:45 -04:00
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), pgFile), BUFSTRDEF("atestfile###"));
|
2020-03-19 13:16:05 -04:00
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
pgFile, false, 9, true, NULL, true, 0xFFFFFFFFFFFFFFFF, pgFile, false, compressTypeNone, 1, backupLabel, false,
|
|
|
|
cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
"file checksummed with pageChecksum enabled");
|
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 18, " copy=repo=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, " copy file");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
2019-11-17 15:10:40 -05:00
|
|
|
storageExistsP(storageRepo(), backupPathFile)),
|
2019-07-25 14:34:16 -04:00
|
|
|
true," copy file to repo success");
|
|
|
|
TEST_RESULT_PTR_NE(result.pageChecksumResult, NULL, " pageChecksumResult is set");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
varBool(kvGet(result.pageChecksumResult, VARSTRDEF("valid"))), false, " pageChecksumResult valid=false");
|
2019-11-17 15:10:40 -05:00
|
|
|
TEST_RESULT_VOID(storageRemoveP(storageRepoWrite(), backupPathFile), " remove repo file");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// Check protocol function directly
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// pgFileSize, ignoreMissing=false, backupLabel, pgFileChecksumPage, pgFileChecksumPageLsnLimit
|
|
|
|
paramList = varLstNew();
|
|
|
|
varLstAdd(paramList, varNewStr(pgFile)); // pgFile
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // pgFileIgnoreMissing
|
2020-04-16 14:48:16 -04:00
|
|
|
varLstAdd(paramList, varNewUInt64(8)); // pgFileSize
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // pgFileCopyExactSize
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, NULL); // pgFileChecksum
|
|
|
|
varLstAdd(paramList, varNewBool(true)); // pgFileChecksumPage
|
2019-12-13 17:14:26 -05:00
|
|
|
varLstAdd(paramList, varNewUInt64(0xFFFFFFFFFFFFFFFF)); // pgFileChecksumPageLsnLimit
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStr(pgFile)); // repoFile
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // repoFileHasReference
|
2020-03-06 14:41:03 -05:00
|
|
|
varLstAdd(paramList, varNewUInt(compressTypeNone)); // repoFileCompress
|
|
|
|
varLstAdd(paramList, varNewInt(1)); // repoFileCompressLevel
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStr(backupLabel)); // backupLabel
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // delta
|
2019-12-13 17:14:26 -05:00
|
|
|
varLstAdd(paramList, NULL); // cipherSubPass
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
backupProtocol(PROTOCOL_COMMAND_BACKUP_FILE_STR, paramList, server), true, "protocol backup file - pageChecksum");
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR_Z(
|
|
|
|
strNewBuf(serverWrite),
|
2020-05-28 10:27:45 -04:00
|
|
|
"{\"out\":[1,12,12,\"c3ae4687ea8ccd47bfdb190dbe7fd3b37545fdb9\",{\"align\":false,\"valid\":false}]}\n",
|
2019-07-25 14:34:16 -04:00
|
|
|
" check result");
|
|
|
|
bufUsedSet(serverWrite, 0);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// File exists in repo and db, checksum match, delta set, ignoreMissing false, hasReference - NOOP
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
pgFile, false, 9, true, strNew("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, true,
|
|
|
|
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
"file in db and repo, checksum equal, no ignoreMissing, no pageChecksum, delta, hasReference");
|
|
|
|
TEST_RESULT_UINT(result.copySize, 9, " copy size set");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 0, " repo size not set since already exists in repo");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultNoOp, " noop file");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
2019-11-17 15:10:40 -05:00
|
|
|
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
true, " noop");
|
|
|
|
|
|
|
|
// Check protocol function directly
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// pgFileChecksum, hasReference, delta
|
|
|
|
paramList = varLstNew();
|
|
|
|
varLstAdd(paramList, varNewStr(pgFile)); // pgFile
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // pgFileIgnoreMissing
|
2020-04-16 14:48:16 -04:00
|
|
|
varLstAdd(paramList, varNewUInt64(12)); // pgFileSize
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // pgFileCopyExactSize
|
2020-05-28 10:27:45 -04:00
|
|
|
varLstAdd(paramList, varNewStrZ("c3ae4687ea8ccd47bfdb190dbe7fd3b37545fdb9")); // pgFileChecksum
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewBool(false)); // pgFileChecksumPage
|
2019-12-13 17:14:26 -05:00
|
|
|
varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStr(pgFile)); // repoFile
|
|
|
|
varLstAdd(paramList, varNewBool(true)); // repoFileHasReference
|
2020-03-06 14:41:03 -05:00
|
|
|
varLstAdd(paramList, varNewUInt(compressTypeNone)); // repoFileCompress
|
|
|
|
varLstAdd(paramList, varNewInt(1)); // repoFileCompressLevel
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStr(backupLabel)); // backupLabel
|
|
|
|
varLstAdd(paramList, varNewBool(true)); // delta
|
2019-12-13 17:14:26 -05:00
|
|
|
varLstAdd(paramList, NULL); // cipherSubPass
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
backupProtocol(PROTOCOL_COMMAND_BACKUP_FILE_STR, paramList, server), true, "protocol backup file - noop");
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR_Z(
|
2020-05-28 10:27:45 -04:00
|
|
|
strNewBuf(serverWrite), "{\"out\":[4,12,0,\"c3ae4687ea8ccd47bfdb190dbe7fd3b37545fdb9\",null]}\n", " check result");
|
2019-07-25 14:34:16 -04:00
|
|
|
bufUsedSet(serverWrite, 0);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// File exists in repo and db, pg checksum mismatch, delta set, ignoreMissing false, hasReference - COPY
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
pgFile, false, 9, true, strNew("1234567890123456789012345678901234567890"), false, 0, pgFile, true,
|
|
|
|
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
"file in db and repo, pg checksum not equal, no ignoreMissing, no pageChecksum, delta, hasReference");
|
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 18, " copy=repo=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, " copy file");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
2019-11-17 15:10:40 -05:00
|
|
|
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
true, " copy");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// File exists in repo and db, pg checksum same, pg size different, delta set, ignoreMissing false, hasReference - COPY
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
pgFile, false, 9999999, true, strNew("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, true,
|
|
|
|
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
"db & repo file, pg checksum same, pg size different, no ignoreMissing, no pageChecksum, delta, hasReference");
|
2020-03-19 13:16:05 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 24, " copy=repo=pgFile size");
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, " copy file");
|
2020-05-28 10:27:45 -04:00
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "c3ae4687ea8ccd47bfdb190dbe7fd3b37545fdb9", "TEST");
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_RESULT_BOOL(
|
2020-05-28 10:27:45 -04:00
|
|
|
(strEqZ(result.copyChecksum, "c3ae4687ea8ccd47bfdb190dbe7fd3b37545fdb9") &&
|
2019-11-17 15:10:40 -05:00
|
|
|
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
true, " copy");
|
|
|
|
|
2019-12-07 09:48:33 -05:00
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("resumed file is missing in repo but present in resumed manfest, recopy");
|
|
|
|
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-04-16 14:48:16 -04:00
|
|
|
pgFile, false, 9, true, strNew("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, STRDEF(BOGUS_STR), false,
|
2020-05-05 13:23:36 -04:00
|
|
|
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
|
2019-12-07 09:48:33 -05:00
|
|
|
"backup file");
|
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 18, " copy=repo=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, " check copy result");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
|
|
|
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
|
|
|
true, " recopy");
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// File exists in repo and db, checksum not same in repo, delta set, ignoreMissing false, no hasReference - RECOPY
|
|
|
|
TEST_RESULT_VOID(
|
2019-11-17 15:10:40 -05:00
|
|
|
storagePutP(storageNewWriteP(storageRepoWrite(), backupPathFile), BUFSTRDEF("adifferentfile")),
|
2019-07-25 14:34:16 -04:00
|
|
|
"create different file (size and checksum) with same name in repo");
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
pgFile, false, 9, true, strNew("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, false,
|
|
|
|
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
" db & repo file, pgFileMatch, repo checksum no match, no ignoreMissing, no pageChecksum, delta, no hasReference");
|
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 18, " copy=repo=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, " recopy file");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
2019-11-17 15:10:40 -05:00
|
|
|
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
true, " recopy");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// File exists in repo but missing from db, checksum same in repo, delta set, ignoreMissing true, no hasReference - SKIP
|
|
|
|
TEST_RESULT_VOID(
|
2019-11-17 15:10:40 -05:00
|
|
|
storagePutP(storageNewWriteP(storageRepoWrite(), backupPathFile), BUFSTRDEF("adifferentfile")),
|
2019-07-25 14:34:16 -04:00
|
|
|
"create different file with same name in repo");
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
missingFile, true, 9, true, strNew("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, false,
|
|
|
|
compressTypeNone, 1, backupLabel, true, cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
" file in repo only, checksum in repo equal, ignoreMissing=true, no pageChecksum, delta, no hasReference");
|
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 0, " copy=repo=0 size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultSkip, " skip file");
|
|
|
|
TEST_RESULT_BOOL(
|
2019-11-17 15:10:40 -05:00
|
|
|
(result.copyChecksum == NULL && !storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
true, " skip and remove file from repo");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// No prior checksum, compression, no page checksum, no pageChecksum, no delta, no hasReference
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2020-05-05 13:23:36 -04:00
|
|
|
backupFile(
|
|
|
|
pgFile, false, 9, true, NULL, false, 0, pgFile, false, compressTypeGz, 3, backupLabel, false, cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
"pg file exists, no checksum, no ignoreMissing, compression, no pageChecksum, no delta, no hasReference");
|
|
|
|
|
|
|
|
TEST_RESULT_UINT(result.copySize, 9, " copy=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 29, " repo compress size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, " copy file");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
2020-07-30 07:49:06 -04:00
|
|
|
storageExistsP(storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(backupLabel), strZ(pgFile))) &&
|
2019-07-25 14:34:16 -04:00
|
|
|
result.pageChecksumResult == NULL),
|
|
|
|
true, " copy file to repo compress success");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// Pg and repo file exist & match, prior checksum, compression, no page checksum, no pageChecksum, no delta, no hasReference
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
pgFile, false, 9, true, strNew("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, false, compressTypeGz,
|
|
|
|
3, backupLabel, false, cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
"pg file & repo exists, match, checksum, no ignoreMissing, compression, no pageChecksum, no delta, no hasReference");
|
|
|
|
|
|
|
|
TEST_RESULT_UINT(result.copySize, 9, " copy=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 29, " repo compress size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultChecksum, " checksum file");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
2020-07-30 07:49:06 -04:00
|
|
|
storageExistsP(storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(backupLabel), strZ(pgFile))) &&
|
2019-07-25 14:34:16 -04:00
|
|
|
result.pageChecksumResult == NULL),
|
|
|
|
true, " compressed repo file matches");
|
|
|
|
|
|
|
|
// Check protocol function directly
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// compression
|
|
|
|
paramList = varLstNew();
|
|
|
|
varLstAdd(paramList, varNewStr(pgFile)); // pgFile
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // pgFileIgnoreMissing
|
|
|
|
varLstAdd(paramList, varNewUInt64(9)); // pgFileSize
|
2020-04-16 14:48:16 -04:00
|
|
|
varLstAdd(paramList, varNewBool(true)); // pgFileCopyExactSize
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStrZ("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67")); // pgFileChecksum
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // pgFileChecksumPage
|
2019-12-13 17:14:26 -05:00
|
|
|
varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStr(pgFile)); // repoFile
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // repoFileHasReference
|
2020-03-06 14:41:03 -05:00
|
|
|
varLstAdd(paramList, varNewUInt(compressTypeGz)); // repoFileCompress
|
|
|
|
varLstAdd(paramList, varNewInt(3)); // repoFileCompressLevel
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStr(backupLabel)); // backupLabel
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // delta
|
2019-12-13 17:14:26 -05:00
|
|
|
varLstAdd(paramList, NULL); // cipherSubPass
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
backupProtocol(PROTOCOL_COMMAND_BACKUP_FILE_STR, paramList, server), true, "protocol backup file - copy, compress");
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR_Z(
|
|
|
|
strNewBuf(serverWrite), "{\"out\":[0,9,29,\"9bc8ab2dda60ef4beed07d1e19ce0676d5edde67\",null]}\n", " check result");
|
2019-07-25 14:34:16 -04:00
|
|
|
bufUsedSet(serverWrite, 0);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// Create a zero sized file - checksum will be set but in backupManifestUpdate it will not be copied
|
2019-11-17 15:10:40 -05:00
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), strNew("zerofile")), BUFSTRDEF(""));
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// No prior checksum, no compression, no pageChecksum, no delta, no hasReference
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
strNew("zerofile"), false, 0, true, NULL, false, 0, strNew("zerofile"), false, compressTypeNone, 1, backupLabel,
|
|
|
|
false, cipherTypeNone, NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
"zero-sized pg file exists, no repo file, no ignoreMissing, no pageChecksum, no delta, no hasReference");
|
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 0, " copy=repo=pgFile size 0");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, " copy file");
|
|
|
|
TEST_RESULT_PTR_NE(result.copyChecksum, NULL, " checksum set");
|
|
|
|
TEST_RESULT_BOOL(
|
2020-07-30 07:49:06 -04:00
|
|
|
(storageExistsP(storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/zerofile", strZ(backupLabel))) &&
|
2019-07-25 14:34:16 -04:00
|
|
|
result.pageChecksumResult == NULL),
|
|
|
|
true, " copy zero file to repo success");
|
|
|
|
|
|
|
|
// Check invalid protocol function
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_RESULT_BOOL(backupProtocol(strNew(BOGUS_STR), paramList, server), false, "invalid function");
|
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupFile() - encrypt"))
|
|
|
|
{
|
|
|
|
// Load Parameters
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--stanza=test1");
|
|
|
|
strLstAdd(argList, strNewFmt("--repo1-path=%s/repo", testPath()));
|
|
|
|
strLstAdd(argList, strNewFmt("--pg1-path=%s/pg", testPath()));
|
|
|
|
strLstAddZ(argList, "--repo1-retention-full=1");
|
|
|
|
strLstAddZ(argList, "--repo1-cipher-type=aes-256-cbc");
|
|
|
|
setenv("PGBACKREST_REPO1_CIPHER_PASS", "12345678", true);
|
2019-10-08 12:06:30 -04:00
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
2019-07-25 14:34:16 -04:00
|
|
|
unsetenv("PGBACKREST_REPO1_CIPHER_PASS");
|
|
|
|
|
|
|
|
// Create the pg path
|
|
|
|
storagePathCreateP(storagePgWrite(), NULL, .mode = 0700);
|
|
|
|
|
|
|
|
// Create a pg file to backup
|
2019-11-17 15:10:40 -05:00
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), pgFile), BUFSTRDEF("atestfile"));
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// No prior checksum, no compression, no pageChecksum, no delta, no hasReference
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
pgFile, false, 9, true, NULL, false, 0, pgFile, false, compressTypeNone, 1, backupLabel, false, cipherTypeAes256Cbc,
|
2019-07-25 14:34:16 -04:00
|
|
|
strNew("12345678")),
|
|
|
|
"pg file exists, no repo file, no ignoreMissing, no pageChecksum, no delta, no hasReference");
|
|
|
|
|
|
|
|
TEST_RESULT_UINT(result.copySize, 9, " copy size set");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 32, " repo size set");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, " copy file");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
2019-11-17 15:10:40 -05:00
|
|
|
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
true, " copy file to encrypted repo success");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// Delta but pgMatch false (pg File size different), prior checksum, no compression, no pageChecksum, delta, no hasReference
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
pgFile, false, 8, true, strNew("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"), false, 0, pgFile, false,
|
|
|
|
compressTypeNone, 1, backupLabel, true, cipherTypeAes256Cbc, strNew("12345678")),
|
2019-07-25 14:34:16 -04:00
|
|
|
"pg and repo file exists, pgFileMatch false, no ignoreMissing, no pageChecksum, delta, no hasReference");
|
2020-03-19 13:16:05 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize, 8, " copy size set");
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_RESULT_UINT(result.repoSize, 32, " repo size set");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, " copy file");
|
|
|
|
TEST_RESULT_BOOL(
|
2020-03-19 13:16:05 -04:00
|
|
|
(strEqZ(result.copyChecksum, "acc972a8319d4903b839c64ec217faa3e77b4fcb") &&
|
2019-11-17 15:10:40 -05:00
|
|
|
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
true, " copy file (size missmatch) to encrypted repo success");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// Check repo with cipher filter.
|
|
|
|
// pg/repo file size same but checksum different, prior checksum, no compression, no pageChecksum, no delta, no hasReference
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
|
|
|
backupFile(
|
2020-05-05 13:23:36 -04:00
|
|
|
pgFile, false, 9, true, strNew("1234567890123456789012345678901234567890"), false, 0, pgFile, false,
|
|
|
|
compressTypeNone, 0, backupLabel, false, cipherTypeAes256Cbc, strNew("12345678")),
|
2019-07-25 14:34:16 -04:00
|
|
|
"pg and repo file exists, repo checksum no match, no ignoreMissing, no pageChecksum, no delta, no hasReference");
|
|
|
|
TEST_RESULT_UINT(result.copySize, 9, " copy size set");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 32, " repo size set");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, " recopy file");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
(strEqZ(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67") &&
|
2019-11-17 15:10:40 -05:00
|
|
|
storageExistsP(storageRepo(), backupPathFile) && result.pageChecksumResult == NULL),
|
2019-07-25 14:34:16 -04:00
|
|
|
true, " recopy file to encrypted repo success");
|
|
|
|
|
|
|
|
// Check protocol function directly
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
// cipherType, cipherPass
|
|
|
|
paramList = varLstNew();
|
|
|
|
varLstAdd(paramList, varNewStr(pgFile)); // pgFile
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // pgFileIgnoreMissing
|
|
|
|
varLstAdd(paramList, varNewUInt64(9)); // pgFileSize
|
2020-04-16 14:48:16 -04:00
|
|
|
varLstAdd(paramList, varNewBool(true)); // pgFileCopyExactSize
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStrZ("1234567890123456789012345678901234567890")); // pgFileChecksum
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // pgFileChecksumPage
|
2019-12-13 17:14:26 -05:00
|
|
|
varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStr(pgFile)); // repoFile
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // repoFileHasReference
|
2020-03-06 14:41:03 -05:00
|
|
|
varLstAdd(paramList, varNewUInt(compressTypeNone)); // repoFileCompress
|
|
|
|
varLstAdd(paramList, varNewInt(0)); // repoFileCompressLevel
|
2019-07-25 14:34:16 -04:00
|
|
|
varLstAdd(paramList, varNewStr(backupLabel)); // backupLabel
|
|
|
|
varLstAdd(paramList, varNewBool(false)); // delta
|
|
|
|
varLstAdd(paramList, varNewStrZ("12345678")); // cipherPass
|
|
|
|
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
backupProtocol(PROTOCOL_COMMAND_BACKUP_FILE_STR, paramList, server), true, "protocol backup file - recopy, encrypt");
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR_Z(
|
|
|
|
strNewBuf(serverWrite), "{\"out\":[2,9,32,\"9bc8ab2dda60ef4beed07d1e19ce0676d5edde67\",null]}\n", " check result");
|
2019-07-25 14:34:16 -04:00
|
|
|
bufUsedSet(serverWrite, 0);
|
|
|
|
}
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupLabelCreate()"))
|
|
|
|
{
|
|
|
|
const String *pg1Path = strNewFmt("%s/pg1", testPath());
|
|
|
|
const String *repoPath = strNewFmt("%s/repo", testPath());
|
|
|
|
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
time_t timestamp = 1575401652;
|
|
|
|
String *backupLabel = backupLabelFormat(backupTypeFull, NULL, timestamp);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("assign label when no history");
|
|
|
|
|
|
|
|
storagePathCreateP(storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/backup.history/2019"));
|
|
|
|
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR(backupLabelCreate(backupTypeFull, NULL, timestamp), backupLabel, "create label");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("assign label when history is older");
|
|
|
|
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
|
|
|
strNewFmt(
|
|
|
|
STORAGE_REPO_BACKUP "/backup.history/2019/%s.manifest.gz",
|
2020-07-30 07:49:06 -04:00
|
|
|
strZ(backupLabelFormat(backupTypeFull, NULL, timestamp - 4)))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR(backupLabelCreate(backupTypeFull, NULL, timestamp), backupLabel, "create label");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("assign label when backup is older");
|
|
|
|
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
2020-07-30 07:49:06 -04:00
|
|
|
strNewFmt(STORAGE_REPO_BACKUP "/%s", strZ(backupLabelFormat(backupTypeFull, NULL, timestamp - 2)))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR(backupLabelCreate(backupTypeFull, NULL, timestamp), backupLabel, "create label");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("advance time when backup is same");
|
|
|
|
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
2020-07-30 07:49:06 -04:00
|
|
|
strNewFmt(STORAGE_REPO_BACKUP "/%s", strZ(backupLabelFormat(backupTypeFull, NULL, timestamp)))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
|
|
|
|
TEST_RESULT_STR_Z(backupLabelCreate(backupTypeFull, NULL, timestamp), "20191203-193413F", "create label");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when new label is in the past even with advanced time");
|
|
|
|
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
2020-07-30 07:49:06 -04:00
|
|
|
strNewFmt(STORAGE_REPO_BACKUP "/%s", strZ(backupLabelFormat(backupTypeFull, NULL, timestamp + 1)))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
|
|
|
|
TEST_ERROR(
|
|
|
|
backupLabelCreate(backupTypeFull, NULL, timestamp), FormatError,
|
|
|
|
"new backup label '20191203-193413F' is not later than latest backup label '20191203-193413F'\n"
|
|
|
|
"HINT: has the timezone changed?\n"
|
|
|
|
"HINT: is there clock skew?");
|
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupInit()"))
|
|
|
|
{
|
|
|
|
const String *pg1Path = strNewFmt("%s/pg1", testPath());
|
|
|
|
const String *repoPath = strNewFmt("%s/repo", testPath());
|
|
|
|
|
|
|
|
// Set log level to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when backup from standby is not supported");
|
|
|
|
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_BACKUP_STANDBY);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
TEST_ERROR(
|
2020-09-18 16:55:26 -04:00
|
|
|
backupInit(infoBackupNew(PG_VERSION_91, 1000000000000000910, pgCatalogTestVersion(PG_VERSION_91), NULL)), ConfigError,
|
2019-12-13 17:14:26 -05:00
|
|
|
"option 'backup-standby' not valid for PostgreSQL < 9.2");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("warn and reset when backup from standby used in offline mode");
|
|
|
|
|
|
|
|
// Create pg_control
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path))),
|
2019-12-13 17:14:26 -05:00
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_92, .systemId = 1000000000000000920}));
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_BACKUP_STANDBY);
|
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
|
|
|
backupInit(infoBackupNew(PG_VERSION_92, 1000000000000000920, pgCatalogTestVersion(PG_VERSION_92), NULL)),
|
|
|
|
"backup init");
|
2019-12-13 17:14:26 -05:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptBackupStandby), false, " check backup-standby");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: option backup-standby is enabled but backup is offline - backups will be performed from the primary");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when pg_control does not match stanza");
|
|
|
|
|
|
|
|
// Create pg_control
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path))),
|
2019-12-13 17:14:26 -05:00
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_10, .systemId = 1000000000000001000}));
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
TEST_ERROR(
|
2020-09-18 16:55:26 -04:00
|
|
|
backupInit(infoBackupNew(PG_VERSION_11, 1000000000000001100, pgCatalogTestVersion(PG_VERSION_11), NULL)),
|
|
|
|
BackupMismatchError,
|
2019-12-13 17:14:26 -05:00
|
|
|
"PostgreSQL version 10, system-id 1000000000000001000 do not match stanza version 11, system-id 1000000000000001100\n"
|
|
|
|
"HINT: is this the correct stanza?");
|
|
|
|
TEST_ERROR(
|
2020-09-18 16:55:26 -04:00
|
|
|
backupInit(infoBackupNew(PG_VERSION_10, 1000000000000001100, pgCatalogTestVersion(PG_VERSION_10), NULL)),
|
|
|
|
BackupMismatchError,
|
2019-12-13 17:14:26 -05:00
|
|
|
"PostgreSQL version 10, system-id 1000000000000001000 do not match stanza version 10, system-id 1000000000000001100\n"
|
|
|
|
"HINT: is this the correct stanza?");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("reset start-fast when PostgreSQL < 8.4");
|
|
|
|
|
|
|
|
// Create pg_control
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path))),
|
2019-12-13 17:14:26 -05:00
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_83, .systemId = 1000000000000000830}));
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_START_FAST);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
|
|
|
backupInit(infoBackupNew(PG_VERSION_83, 1000000000000000830, pgCatalogTestVersion(PG_VERSION_83), NULL)),
|
|
|
|
"backup init");
|
2019-12-13 17:14:26 -05:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptStartFast), false, " check start-fast");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG("P00 WARN: start-fast option is only available in PostgreSQL >= 8.4");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2019-12-14 09:53:50 -05:00
|
|
|
TEST_TITLE("reset stop-auto when PostgreSQL < 9.3");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Create pg_control
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path))),
|
2019-12-13 17:14:26 -05:00
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_84, .systemId = 1000000000000000840}));
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STOP_AUTO);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
|
|
|
backupInit(infoBackupNew(PG_VERSION_84, 1000000000000000840, pgCatalogTestVersion(PG_VERSION_84), NULL)),
|
|
|
|
"backup init");
|
2019-12-13 17:14:26 -05:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptStopAuto), false, " check stop-auto");
|
|
|
|
|
2019-12-14 09:53:50 -05:00
|
|
|
TEST_RESULT_LOG("P00 WARN: stop-auto option is only available in PostgreSQL >= 9.3");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("reset checksum-page when the cluster does not have checksums enabled");
|
|
|
|
|
|
|
|
// Create pg_control
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path))),
|
2019-12-13 17:14:26 -05:00
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_93, .systemId = PG_VERSION_93}));
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_CHECKSUM_PAGE);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2020-07-30 07:49:06 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_93, strZ(pg1Path), false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
|
|
|
dbFree(backupInit(infoBackupNew(PG_VERSION_93, PG_VERSION_93, pgCatalogTestVersion(PG_VERSION_93), NULL))->dbPrimary),
|
|
|
|
"backup init");
|
2019-12-13 17:14:26 -05:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptChecksumPage), false, " check checksum-page");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: checksum-page option set to true but checksums are not enabled on the cluster, resetting to false");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("ok if cluster checksums are enabled and checksum-page is any value");
|
|
|
|
|
|
|
|
// Create pg_control with page checksums
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path))),
|
2019-12-13 17:14:26 -05:00
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_93, .systemId = PG_VERSION_93, .pageChecksum = true}));
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_CHECKSUM_PAGE);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2020-07-30 07:49:06 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_93, strZ(pg1Path), false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
|
|
|
dbFree(backupInit(infoBackupNew(PG_VERSION_93, PG_VERSION_93, pgCatalogTestVersion(PG_VERSION_93), NULL))->dbPrimary),
|
|
|
|
"backup init");
|
2019-12-13 17:14:26 -05:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptChecksumPage), false, " check checksum-page");
|
|
|
|
|
|
|
|
// Create pg_control without page checksums
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path))),
|
2019-12-13 17:14:26 -05:00
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_93, .systemId = PG_VERSION_93}));
|
|
|
|
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2020-07-30 07:49:06 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_93, strZ(pg1Path), false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
|
|
|
dbFree(backupInit(infoBackupNew(PG_VERSION_93, PG_VERSION_93, pgCatalogTestVersion(PG_VERSION_93), NULL))->dbPrimary),
|
|
|
|
"backup init");
|
2019-12-13 17:14:26 -05:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptChecksumPage), false, " check checksum-page");
|
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupTime()"))
|
|
|
|
{
|
|
|
|
const String *pg1Path = strNewFmt("%s/pg1", testPath());
|
|
|
|
const String *repoPath = strNewFmt("%s/repo", testPath());
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when second does not advance after sleep");
|
|
|
|
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
// Create pg_control
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path))),
|
2019-12-13 17:14:26 -05:00
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_93, .systemId = PG_VERSION_93}));
|
|
|
|
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2020-07-30 07:49:06 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_93, strZ(pg1Path), false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Don't advance time after wait
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, 1575392588998),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, 1575392588999),
|
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
BackupData *backupData = backupInit(
|
|
|
|
infoBackupNew(PG_VERSION_93, PG_VERSION_93, pgCatalogTestVersion(PG_VERSION_93), NULL));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_ERROR(backupTime(backupData, true), AssertError, "invalid sleep for online backup time with wait remainder");
|
|
|
|
dbFree(backupData->dbPrimary);
|
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupResumeFind()"))
|
|
|
|
{
|
|
|
|
const String *repoPath = strNewFmt("%s/repo", testPath());
|
|
|
|
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, "/pg");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_FULL);
|
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_COMPRESS);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume empty directory");
|
|
|
|
|
|
|
|
storagePathCreateP(storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F"));
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind((Manifest *)1, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when resume is disabled");
|
|
|
|
|
|
|
|
cfgOptionSet(cfgOptResume, cfgSourceParam, BOOL_FALSE_VAR);
|
|
|
|
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT)),
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind((Manifest *)1, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed: resume is disabled");
|
|
|
|
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
storagePathExistsP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")), false, "check backup path removed");
|
|
|
|
|
|
|
|
cfgOptionSet(cfgOptResume, cfgSourceParam, BOOL_TRUE_VAR);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when pgBackRest version has changed");
|
|
|
|
|
|
|
|
Manifest *manifestResume = manifestNewInternal();
|
|
|
|
manifestResume->info = infoNew(NULL);
|
|
|
|
manifestResume->data.backupType = backupTypeFull;
|
|
|
|
manifestResume->data.backupLabel = STRDEF("20191003-105320F");
|
|
|
|
manifestResume->data.pgVersion = PG_VERSION_12;
|
|
|
|
|
|
|
|
manifestTargetAdd(manifestResume, &(ManifestTarget){.name = MANIFEST_TARGET_PGDATA_STR, .path = STRDEF("/pg")});
|
|
|
|
manifestPathAdd(manifestResume, &(ManifestPath){.name = MANIFEST_TARGET_PGDATA_STR});
|
|
|
|
manifestFileAdd(manifestResume, &(ManifestFile){.name = STRDEF("pg_data/" PG_FILE_PGVERSION)});
|
|
|
|
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT))));
|
|
|
|
|
|
|
|
Manifest *manifest = manifestNewInternal();
|
|
|
|
manifest->data.backupType = backupTypeFull;
|
|
|
|
manifest->data.backrestVersion = STRDEF("BOGUS");
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed:"
|
|
|
|
" new pgBackRest version 'BOGUS' does not match resumable pgBackRest version '" PROJECT_VERSION "'");
|
|
|
|
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
storagePathExistsP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")), false, "check backup path removed");
|
|
|
|
|
|
|
|
manifest->data.backrestVersion = STRDEF(PROJECT_VERSION);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when backup labels do not match (resumable is null)");
|
|
|
|
|
|
|
|
manifest->data.backupType = backupTypeFull;
|
|
|
|
manifest->data.backupLabelPrior = STRDEF("20191003-105320F");
|
|
|
|
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT))));
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed:"
|
|
|
|
" new prior backup label '<undef>' does not match resumable prior backup label '20191003-105320F'");
|
|
|
|
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
storagePathExistsP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")), false, "check backup path removed");
|
|
|
|
|
|
|
|
manifest->data.backupLabelPrior = NULL;
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when backup labels do not match (new is null)");
|
|
|
|
|
|
|
|
manifest->data.backupType = backupTypeFull;
|
|
|
|
manifestResume->data.backupLabelPrior = STRDEF("20191003-105320F");
|
|
|
|
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT))));
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed:"
|
|
|
|
" new prior backup label '20191003-105320F' does not match resumable prior backup label '<undef>'");
|
|
|
|
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
storagePathExistsP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")), false, "check backup path removed");
|
|
|
|
|
|
|
|
manifestResume->data.backupLabelPrior = NULL;
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when compression does not match");
|
|
|
|
|
2020-03-06 14:41:03 -05:00
|
|
|
manifestResume->data.backupOptionCompressType = compressTypeGz;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT))));
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed:"
|
2020-03-06 14:41:03 -05:00
|
|
|
" new compression 'none' does not match resumable compression 'gz'");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
storagePathExistsP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")), false, "check backup path removed");
|
|
|
|
|
2020-03-06 14:41:03 -05:00
|
|
|
manifestResume->data.backupOptionCompressType = compressTypeNone;
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupJobResult()"))
|
|
|
|
{
|
|
|
|
// Set log level to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-01-26 13:19:13 -07:00
|
|
|
TEST_TITLE("report job error");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
ProtocolParallelJob *job = protocolParallelJobNew(VARSTRDEF("key"), protocolCommandNew(STRDEF("command")));
|
|
|
|
protocolParallelJobErrorSet(job, errorTypeCode(&AssertError), STRDEF("error message"));
|
|
|
|
|
2020-03-05 09:14:27 -05:00
|
|
|
TEST_ERROR(backupJobResult((Manifest *)1, NULL, STRDEF("log"), strLstNew(), job, 0, 0), AssertError, "error message");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-01-26 13:19:13 -07:00
|
|
|
TEST_TITLE("report host/100% progress on noop result");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Create job that skips file
|
|
|
|
job = protocolParallelJobNew(VARSTRDEF("pg_data/test"), protocolCommandNew(STRDEF("command")));
|
|
|
|
|
|
|
|
VariantList *result = varLstNew();
|
2020-01-26 13:19:13 -07:00
|
|
|
varLstAdd(result, varNewUInt64(backupCopyResultNoOp));
|
2019-12-13 17:14:26 -05:00
|
|
|
varLstAdd(result, varNewUInt64(0));
|
|
|
|
varLstAdd(result, varNewUInt64(0));
|
|
|
|
varLstAdd(result, NULL);
|
|
|
|
varLstAdd(result, NULL);
|
|
|
|
|
|
|
|
protocolParallelJobResultSet(job, varNewVarLst(result));
|
|
|
|
|
|
|
|
// Create manifest with file
|
|
|
|
Manifest *manifest = manifestNewInternal();
|
|
|
|
manifestFileAdd(manifest, &(ManifestFile){.name = STRDEF("pg_data/test")});
|
|
|
|
|
2020-01-26 13:19:13 -07:00
|
|
|
TEST_RESULT_UINT(
|
2020-03-05 09:14:27 -05:00
|
|
|
backupJobResult(manifest, STRDEF("host"), STRDEF("log-test"), strLstNew(), job, 0, 0), 0, "log noop result");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-01-26 13:19:13 -07:00
|
|
|
TEST_RESULT_LOG("P00 DETAIL: match file from prior backup host:log-test (0B, 100%)");
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Offline tests should only be used to test offline functionality and errors easily tested in offline mode
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("cmdBackup() offline"))
|
|
|
|
{
|
|
|
|
const String *pg1Path = strNewFmt("%s/pg1", testPath());
|
|
|
|
const String *repoPath = strNewFmt("%s/repo", testPath());
|
|
|
|
|
|
|
|
// Set log level to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
|
|
|
|
|
|
|
// Replace backup labels since the times are not deterministic
|
|
|
|
hrnLogReplaceAdd("[0-9]{8}-[0-9]{6}F_[0-9]{8}-[0-9]{6}I", NULL, "INCR", true);
|
|
|
|
hrnLogReplaceAdd("[0-9]{8}-[0-9]{6}F_[0-9]{8}-[0-9]{6}D", NULL, "DIFF", true);
|
|
|
|
hrnLogReplaceAdd("[0-9]{8}-[0-9]{6}F", NULL, "FULL", true);
|
|
|
|
|
|
|
|
// Create pg_control
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path))),
|
2019-12-13 17:14:26 -05:00
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_84, .systemId = 1000000000000000840}));
|
|
|
|
|
|
|
|
// Create stanza
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
harnessCfgLoad(cfgCmdStanzaCreate, argList);
|
|
|
|
|
|
|
|
cmdStanzaCreate();
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-06-17 15:14:59 -04:00
|
|
|
TEST_TITLE("error when pg appears to be running");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), PG_FILE_POSTMASTERPID_STR), BUFSTRDEF("PID"));
|
|
|
|
|
|
|
|
TEST_ERROR(
|
2020-06-17 15:14:59 -04:00
|
|
|
cmdBackup(), PgRunningError,
|
|
|
|
"--no-online passed but postmaster.pid exists - looks like " PG_NAME " is running. Shut down " PG_NAME " and try"
|
2019-12-13 17:14:26 -05:00
|
|
|
" again, or use --force.");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG("P00 WARN: no prior backup exists, incr backup has been changed to full");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("offline full backup");
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_COMPRESS);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_FORCE);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), STRDEF("postgresql.conf")), BUFSTRDEF("CONFIGSTUFF"));
|
|
|
|
|
|
|
|
TEST_RESULT_VOID(cmdBackup(), "backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG_FMT(
|
|
|
|
"P00 WARN: no prior backup exists, incr backup has been changed to full\n"
|
|
|
|
"P00 WARN: --no-online passed and postmaster.pid exists but --force was passed so backup will continue though it"
|
2020-06-17 15:14:59 -04:00
|
|
|
" looks like " PG_NAME " is running and the backup will probably not be consistent\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P01 INFO: backup file {[path]}/pg1/global/pg_control (8KB, 99%%) checksum %s\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/postgresql.conf (11B, 100%%) checksum e3db315c260e79211b7b52587123b7aa060f30ab\n"
|
|
|
|
"P00 INFO: full backup size = 8KB\n"
|
|
|
|
"P00 INFO: new backup label = [FULL-1]",
|
2020-07-20 09:59:16 -04:00
|
|
|
TEST_64BIT() ?
|
|
|
|
(TEST_BIG_ENDIAN() ? "749acedef8f8d5fe35fc20c0375657f876ccc38e" : "21e2ddc99cdf4cfca272eee4f38891146092e358") :
|
|
|
|
"8bb70506d988a8698d9e8cf90736ada23634571b");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-06-17 15:14:59 -04:00
|
|
|
// Make pg no longer appear to be running
|
2019-12-13 17:14:26 -05:00
|
|
|
storageRemoveP(storagePgWrite(), PG_FILE_POSTMASTERPID_STR, .errorOnMissing = true);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when no files have changed");
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_COMPRESS);
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoHardlink, true);
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_DIFF);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
TEST_ERROR(cmdBackup(), FileMissingError, "no files have changed since the last backup - this seems unlikely");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = [FULL-1], version = " PROJECT_VERSION "\n"
|
2020-03-06 14:41:03 -05:00
|
|
|
"P00 WARN: diff backup cannot alter compress-type option to 'gz', reset to value in [FULL-1]\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 WARN: diff backup cannot alter hardlink option to 'true', reset to value in [FULL-1]");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("offline incr backup to test unresumable backup");
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_COMPRESS);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_CHECKSUM_PAGE);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_INCR);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), PG_FILE_PGVERSION_STR), BUFSTRDEF("VER"));
|
|
|
|
|
|
|
|
TEST_RESULT_VOID(cmdBackup(), "backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = [FULL-1], version = " PROJECT_VERSION "\n"
|
|
|
|
"P00 WARN: incr backup cannot alter 'checksum-page' option to 'true', reset to 'false' from [FULL-1]\n"
|
|
|
|
"P00 WARN: backup '[DIFF-1]' cannot be resumed: new backup type 'incr' does not match resumable backup type 'diff'\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/PG_VERSION (3B, 100%) checksum c8663c2525f44b6d9c687fbceb4aafc63ed8b451\n"
|
|
|
|
"P00 DETAIL: reference pg_data/global/pg_control to [FULL-1]\n"
|
|
|
|
"P00 DETAIL: reference pg_data/postgresql.conf to [FULL-1]\n"
|
|
|
|
"P00 INFO: incr backup size = 3B\n"
|
|
|
|
"P00 INFO: new backup label = [INCR-1]");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("offline diff backup to test prior backup must be full");
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_COMPRESS);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_DIFF);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
sleepMSec(MSEC_PER_SEC - (timeMSec() % MSEC_PER_SEC));
|
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), PG_FILE_PGVERSION_STR), BUFSTRDEF("VR2"));
|
|
|
|
|
|
|
|
TEST_RESULT_VOID(cmdBackup(), "backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = [FULL-1], version = " PROJECT_VERSION "\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/PG_VERSION (3B, 100%) checksum 6f1894088c578e4f0b9888e8e8a997d93cbbc0c5\n"
|
|
|
|
"P00 DETAIL: reference pg_data/global/pg_control to [FULL-1]\n"
|
|
|
|
"P00 DETAIL: reference pg_data/postgresql.conf to [FULL-1]\n"
|
|
|
|
"P00 INFO: diff backup size = 3B\n"
|
|
|
|
"P00 INFO: new backup label = [DIFF-2]");
|
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("cmdBackup() online"))
|
|
|
|
{
|
|
|
|
const String *pg1Path = strNewFmt("%s/pg1", testPath());
|
|
|
|
const String *repoPath = strNewFmt("%s/repo", testPath());
|
|
|
|
const String *pg2Path = strNewFmt("%s/pg2", testPath());
|
|
|
|
|
|
|
|
// Set log level to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
|
|
|
|
|
|
|
// Replace percent complete and backup size since they can cause a lot of churn when files are added/removed
|
|
|
|
hrnLogReplaceAdd(", [0-9]{1,3}%\\)", "[0-9]+%", "PCT", false);
|
|
|
|
hrnLogReplaceAdd(" backup size = [0-9]+[A-Z]+", "[^ ]+$", "SIZE", false);
|
|
|
|
|
|
|
|
// Replace checksums since they can differ between architectures (e.g. 32/64 bit)
|
|
|
|
hrnLogReplaceAdd("\\) checksum [a-f0-9]{40}", "[a-f0-9]{40}$", "SHA1", false);
|
|
|
|
|
|
|
|
// Backup start time epoch. The idea is to not have backup times (and therefore labels) ever change. Each backup added
|
|
|
|
// should be separated by 100,000 seconds (1,000,000 after stanza-upgrade) but after the initial assignments this will only
|
|
|
|
// be possible at the beginning and the end, so new backups added in the middle will average the start times of the prior
|
|
|
|
// and next backup to get their start time. Backups added to the beginning of the test will need to subtract from the
|
|
|
|
// epoch.
|
|
|
|
#define BACKUP_EPOCH 1570000000
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online 9.5 resume uncompressed full backup");
|
|
|
|
|
|
|
|
time_t backupTimeStart = BACKUP_EPOCH;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Create pg_control
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path)),
|
2019-12-13 17:14:26 -05:00
|
|
|
.timeModified = backupTimeStart),
|
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_95, .systemId = 1000000000000000950}));
|
|
|
|
|
|
|
|
// Create stanza
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
harnessCfgLoad(cfgCmdStanzaCreate, argList);
|
|
|
|
|
|
|
|
cmdStanzaCreate();
|
|
|
|
|
|
|
|
// Load options
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_FULL);
|
2019-12-14 09:53:50 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_STOP_AUTO);
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_COMPRESS);
|
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ARCHIVE_CHECK);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
// Add files
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(storagePgWrite(), STRDEF("postgresql.conf"), .timeModified = backupTimeStart),
|
|
|
|
BUFSTRDEF("CONFIGSTUFF"));
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(storagePgWrite(), PG_FILE_PGVERSION_STR, .timeModified = backupTimeStart),
|
|
|
|
BUFSTRDEF(PG_VERSION_95_STR));
|
|
|
|
storagePathCreateP(storagePgWrite(), pgWalPath(PG_VERSION_95), .noParentCreate = true);
|
|
|
|
|
|
|
|
// Create a backup manifest that looks like a halted backup manifest
|
2020-09-18 16:55:26 -04:00
|
|
|
Manifest *manifestResume = manifestNewBuild(
|
|
|
|
storagePg(), PG_VERSION_95, pgCatalogTestVersion(PG_VERSION_95), true, false, NULL, NULL);
|
2019-12-13 17:14:26 -05:00
|
|
|
ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume);
|
|
|
|
|
|
|
|
manifestResumeData->backupType = backupTypeFull;
|
|
|
|
const String *resumeLabel = backupLabelCreate(backupTypeFull, NULL, backupTimeStart);
|
|
|
|
manifestBackupLabelSet(manifestResume, resumeLabel);
|
|
|
|
|
|
|
|
// Copy a file to be resumed that has not changed in the repo
|
|
|
|
storageCopy(
|
|
|
|
storageNewReadP(storagePg(), PG_FILE_PGVERSION_STR),
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/PG_VERSION", strZ(resumeLabel))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
strcpy(
|
|
|
|
((ManifestFile *)manifestFileFind(manifestResume, STRDEF("pg_data/PG_VERSION")))->checksumSha1,
|
|
|
|
"06d06bb31b570b94d7b4325f511f853dbe771c21");
|
|
|
|
|
|
|
|
// Save the resume manifest
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
2020-07-30 07:49:06 -04:00
|
|
|
strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strZ(resumeLabel)))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Run backup
|
|
|
|
testBackupPqScriptP(PG_VERSION_95, backupTimeStart);
|
|
|
|
TEST_RESULT_VOID(cmdBackup(), "backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: execute exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
|
|
|
"P00 INFO: backup start archive = 0000000105D944C000000000, lsn = 5d944c0/0\n"
|
|
|
|
"P00 WARN: resumable backup 20191002-070640F of same type exists -- remove invalid files and resume\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: checksum resumed file {[path]}/pg1/PG_VERSION (3B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P00 INFO: full backup size = [SIZE]\n"
|
|
|
|
"P00 INFO: execute exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
|
|
|
"P00 INFO: backup stop archive = 0000000105D944C000000000, lsn = 5d944c0/800000\n"
|
|
|
|
"P00 INFO: new backup label = 20191002-070640F");
|
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
TEST_RESULT_STR_Z_KEYRPL(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191002-070640F}\n"
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION {file, s=3}\n"
|
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control {file, s=8192}\n"
|
|
|
|
"pg_data/pg_xlog {path}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/postgresql.conf {file, s=11}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
|
|
|
"pg_data={\"path\":\"{[path]}/pg1\",\"type\":\"path\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"06d06bb31b570b94d7b4325f511f853dbe771c21\",\"size\":3"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\",\"size\":11"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_xlog={}\n",
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online resumed compressed 9.5 full backup");
|
|
|
|
|
|
|
|
// Backup start time
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 100000;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Load options
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_FULL);
|
2019-12-14 09:53:50 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_STOP_AUTO);
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoHardlink, true);
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_ARCHIVE_COPY);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
// Create a backup manifest that looks like a halted backup manifest
|
2020-09-18 16:55:26 -04:00
|
|
|
Manifest *manifestResume = manifestNewBuild(
|
|
|
|
storagePg(), PG_VERSION_95, pgCatalogTestVersion(PG_VERSION_95), true, false, NULL, NULL);
|
2019-12-13 17:14:26 -05:00
|
|
|
ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume);
|
|
|
|
|
|
|
|
manifestResumeData->backupType = backupTypeFull;
|
2020-03-06 14:41:03 -05:00
|
|
|
manifestResumeData->backupOptionCompressType = compressTypeGz;
|
2019-12-13 17:14:26 -05:00
|
|
|
const String *resumeLabel = backupLabelCreate(backupTypeFull, NULL, backupTimeStart);
|
|
|
|
manifestBackupLabelSet(manifestResume, resumeLabel);
|
|
|
|
|
|
|
|
// File exists in cluster and repo but not in the resume manifest
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(storagePgWrite(), STRDEF("not-in-resume"), .timeModified = backupTimeStart), BUFSTRDEF("TEST"));
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/not-in-resume.gz", strZ(resumeLabel))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
|
|
|
|
// Remove checksum from file so it won't be resumed
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/global/pg_control.gz", strZ(resumeLabel))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
|
|
|
|
((ManifestFile *)manifestFileFind(manifestResume, STRDEF("pg_data/global/pg_control")))->checksumSha1[0] = 0;
|
|
|
|
|
|
|
|
// Size does not match between cluster and resume manifest
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(storagePgWrite(), STRDEF("size-mismatch"), .timeModified = backupTimeStart), BUFSTRDEF("TEST"));
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/size-mismatch.gz", strZ(resumeLabel))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
manifestFileAdd(
|
|
|
|
manifestResume, &(ManifestFile){
|
2020-11-09 16:26:43 -05:00
|
|
|
.name = STRDEF("pg_data/size-mismatch"), .checksumSha1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
|
|
|
.size = 33});
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Time does not match between cluster and resume manifest
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(storagePgWrite(), STRDEF("time-mismatch"), .timeModified = backupTimeStart), BUFSTRDEF("TEST"));
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/time-mismatch.gz", strZ(resumeLabel))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
manifestFileAdd(
|
|
|
|
manifestResume, &(ManifestFile){
|
|
|
|
.name = STRDEF("pg_data/time-mismatch"), .checksumSha1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", .size = 4,
|
|
|
|
.timestamp = backupTimeStart - 1});
|
|
|
|
|
|
|
|
// Size is zero in cluster and resume manifest. ??? We'd like to remove this requirement after the migration.
|
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), STRDEF("zero-size"), .timeModified = backupTimeStart), NULL);
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/zero-size.gz", strZ(resumeLabel))),
|
2019-12-13 17:14:26 -05:00
|
|
|
BUFSTRDEF("ZERO-SIZE"));
|
|
|
|
manifestFileAdd(
|
|
|
|
manifestResume, &(ManifestFile){.name = STRDEF("pg_data/zero-size"), .size = 0, .timestamp = backupTimeStart});
|
|
|
|
|
|
|
|
// Path is not in manifest
|
2020-07-30 07:49:06 -04:00
|
|
|
storagePathCreateP(storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/bogus_path", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// File is not in manifest
|
2020-03-06 14:41:03 -05:00
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/global/bogus.gz", strZ(resumeLabel))),
|
2020-03-06 14:41:03 -05:00
|
|
|
NULL);
|
|
|
|
|
|
|
|
// File has incorrect compression type
|
2019-12-13 17:14:26 -05:00
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/global/bogus", strZ(resumeLabel))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
|
|
|
|
// Save the resume manifest
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
2020-07-30 07:49:06 -04:00
|
|
|
strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strZ(resumeLabel)))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Disable storageFeaturePath so paths will not be created before files are copied
|
|
|
|
((Storage *)storageRepoWrite())->interface.feature ^= 1 << storageFeaturePath;
|
|
|
|
|
|
|
|
// Disable storageFeaturePathSync so paths will not be synced
|
|
|
|
((Storage *)storageRepoWrite())->interface.feature ^= 1 << storageFeaturePathSync;
|
|
|
|
|
|
|
|
// Run backup
|
|
|
|
testBackupPqScriptP(PG_VERSION_95, backupTimeStart);
|
|
|
|
TEST_RESULT_VOID(cmdBackup(), "backup");
|
|
|
|
|
|
|
|
// Enable storage features
|
|
|
|
((Storage *)storageRepoWrite())->interface.feature |= 1 << storageFeaturePath;
|
|
|
|
((Storage *)storageRepoWrite())->interface.feature |= 1 << storageFeaturePathSync;
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: execute exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
|
|
|
"P00 INFO: backup start archive = 0000000105D95D3000000000, lsn = 5d95d30/0\n"
|
|
|
|
"P00 WARN: resumable backup 20191003-105320F of same type exists -- remove invalid files and resume\n"
|
|
|
|
"P00 DETAIL: remove path '{[path]}/repo/backup/test1/20191003-105320F/pg_data/bogus_path' from resumed backup\n"
|
|
|
|
"P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/global/bogus' from resumed backup"
|
2020-03-06 14:41:03 -05:00
|
|
|
" (mismatched compression type)\n"
|
|
|
|
"P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/global/bogus.gz' from resumed backup"
|
2019-12-13 17:14:26 -05:00
|
|
|
" (missing in manifest)\n"
|
|
|
|
"P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/global/pg_control.gz' from resumed"
|
|
|
|
" backup (no checksum in resumed manifest)\n"
|
|
|
|
"P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/not-in-resume.gz' from resumed backup"
|
|
|
|
" (missing in resumed manifest)\n"
|
|
|
|
"P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/size-mismatch.gz' from resumed backup"
|
|
|
|
" (mismatched size)\n"
|
|
|
|
"P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/time-mismatch.gz' from resumed backup"
|
|
|
|
" (mismatched timestamp)\n"
|
|
|
|
"P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/zero-size.gz' from resumed backup"
|
|
|
|
" (zero size)\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/time-mismatch (4B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/size-mismatch (4B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/not-in-resume (4B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/PG_VERSION (3B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/zero-size (0B, [PCT])\n"
|
|
|
|
"P00 INFO: full backup size = [SIZE]\n"
|
|
|
|
"P00 INFO: execute exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
|
|
|
"P00 INFO: backup stop archive = 0000000105D95D3000000000, lsn = 5d95d30/800000\n"
|
|
|
|
"P00 INFO: check archive for segment(s) 0000000105D95D3000000000:0000000105D95D3000000000\n"
|
|
|
|
"P00 INFO: new backup label = 20191003-105320F");
|
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
TEST_RESULT_STR_Z_KEYRPL(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191003-105320F}\n"
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION.gz {file, s=3}\n"
|
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control.gz {file, s=8192}\n"
|
|
|
|
"pg_data/not-in-resume.gz {file, s=4}\n"
|
|
|
|
"pg_data/pg_xlog {path}\n"
|
|
|
|
"pg_data/pg_xlog/0000000105D95D3000000000.gz {file, s=16777216}\n"
|
|
|
|
"pg_data/postgresql.conf.gz {file, s=11}\n"
|
|
|
|
"pg_data/size-mismatch.gz {file, s=4}\n"
|
|
|
|
"pg_data/time-mismatch.gz {file, s=4}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/zero-size.gz {file, s=0}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
|
|
|
"pg_data={\"path\":\"{[path]}/pg1\",\"type\":\"path\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"06d06bb31b570b94d7b4325f511f853dbe771c21\",\"size\":3"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/not-in-resume={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
|
|
|
|
",\"timestamp\":1570100000}\n"
|
|
|
|
"pg_data/pg_xlog/0000000105D95D3000000000={\"size\":16777216,\"timestamp\":1570100002}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\",\"size\":11"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/size-mismatch={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
|
|
|
|
",\"timestamp\":1570100000}\n"
|
|
|
|
"pg_data/time-mismatch={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
|
|
|
|
",\"timestamp\":1570100000}\n"
|
|
|
|
"pg_data/zero-size={\"size\":0,\"timestamp\":1570100000}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_xlog={}\n",
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
|
|
|
|
// Remove test files
|
|
|
|
storageRemoveP(storagePgWrite(), STRDEF("not-in-resume"), .errorOnMissing = true);
|
|
|
|
storageRemoveP(storagePgWrite(), STRDEF("size-mismatch"), .errorOnMissing = true);
|
|
|
|
storageRemoveP(storagePgWrite(), STRDEF("time-mismatch"), .errorOnMissing = true);
|
|
|
|
storageRemoveP(storagePgWrite(), STRDEF("zero-size"), .errorOnMissing = true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online resumed compressed 9.5 diff backup");
|
|
|
|
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 200000;
|
|
|
|
|
|
|
|
{
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_DIFF);
|
2020-03-06 14:41:03 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_COMPRESS);
|
2019-12-14 09:53:50 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_STOP_AUTO);
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoHardlink, true);
|
2019-12-13 17:14:26 -05:00
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
// Load the previous manifest and null out the checksum-page option to be sure it gets set to false in this backup
|
|
|
|
const String *manifestPriorFile = STRDEF(STORAGE_REPO_BACKUP "/latest/" BACKUP_MANIFEST_FILE);
|
|
|
|
Manifest *manifestPrior = manifestNewLoad(storageReadIo(storageNewReadP(storageRepo(), manifestPriorFile)));
|
|
|
|
((ManifestData *)manifestData(manifestPrior))->backupOptionChecksumPage = NULL;
|
|
|
|
manifestSave(manifestPrior, storageWriteIo(storageNewWriteP(storageRepoWrite(), manifestPriorFile)));
|
|
|
|
|
|
|
|
// Create a backup manifest that looks like a halted backup manifest
|
2020-09-18 16:55:26 -04:00
|
|
|
Manifest *manifestResume = manifestNewBuild(
|
|
|
|
storagePg(), PG_VERSION_95, pgCatalogTestVersion(PG_VERSION_95), true, false, NULL, NULL);
|
2019-12-13 17:14:26 -05:00
|
|
|
ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume);
|
|
|
|
|
|
|
|
manifestResumeData->backupType = backupTypeDiff;
|
|
|
|
manifestResumeData->backupLabelPrior = manifestData(manifestPrior)->backupLabel;
|
2020-03-06 14:41:03 -05:00
|
|
|
manifestResumeData->backupOptionCompressType = compressTypeGz;
|
2020-07-30 07:49:06 -04:00
|
|
|
const String *resumeLabel = backupLabelCreate(
|
|
|
|
backupTypeDiff, manifestData(manifestPrior)->backupLabel, backupTimeStart);
|
2019-12-13 17:14:26 -05:00
|
|
|
manifestBackupLabelSet(manifestResume, resumeLabel);
|
|
|
|
|
|
|
|
// Reference in manifest
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/PG_VERSION.gz", strZ(resumeLabel))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
|
|
|
|
// Reference in resumed manifest
|
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), STRDEF("resume-ref"), .timeModified = backupTimeStart), NULL);
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/resume-ref.gz", strZ(resumeLabel))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
manifestFileAdd(
|
|
|
|
manifestResume, &(ManifestFile){.name = STRDEF("pg_data/resume-ref"), .size = 0, .reference = STRDEF("BOGUS")});
|
|
|
|
|
2020-11-09 16:26:43 -05:00
|
|
|
// Time does not match between cluster and resume manifest (but resume because time is in future so delta enabled). Note
|
2019-12-13 17:14:26 -05:00
|
|
|
// also that the repo file is intenionally corrupt to generate a warning about corruption in the repository.
|
|
|
|
storagePutP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageNewWriteP(storagePgWrite(), STRDEF("time-mismatch2"), .timeModified = backupTimeStart + 100),
|
|
|
|
BUFSTRDEF("TEST"));
|
2019-12-13 17:14:26 -05:00
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/time-mismatch2.gz", strZ(resumeLabel))),
|
2019-12-13 17:14:26 -05:00
|
|
|
NULL);
|
|
|
|
manifestFileAdd(
|
|
|
|
manifestResume, &(ManifestFile){
|
|
|
|
.name = STRDEF("pg_data/time-mismatch2"), .checksumSha1 = "984816fd329622876e14907634264e6f332e9fb3", .size = 4,
|
|
|
|
.timestamp = backupTimeStart});
|
|
|
|
|
|
|
|
// Links are always removed on resume
|
|
|
|
THROW_ON_SYS_ERROR(
|
|
|
|
symlink(
|
|
|
|
"..",
|
2020-07-30 07:49:06 -04:00
|
|
|
strZ(storagePathP(storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/link", strZ(resumeLabel))))) == -1,
|
2019-12-13 17:14:26 -05:00
|
|
|
FileOpenError, "unable to create symlink");
|
|
|
|
|
|
|
|
// Special files should not be in the repo
|
|
|
|
TEST_SYSTEM_FMT(
|
|
|
|
"mkfifo -m 666 %s",
|
2020-07-30 07:49:06 -04:00
|
|
|
strZ(storagePathP(storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/pipe", strZ(resumeLabel)))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Save the resume manifest
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
2020-07-30 07:49:06 -04:00
|
|
|
strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strZ(resumeLabel)))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Run backup
|
|
|
|
testBackupPqScriptP(PG_VERSION_95, backupTimeStart);
|
|
|
|
TEST_RESULT_VOID(cmdBackup(), "backup");
|
|
|
|
|
|
|
|
// Check log
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = 20191003-105320F, version = " PROJECT_VERSION "\n"
|
2020-03-06 14:41:03 -05:00
|
|
|
"P00 WARN: diff backup cannot alter compress-type option to 'none', reset to value in 20191003-105320F\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 INFO: execute exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
|
|
|
"P00 INFO: backup start archive = 0000000105D9759000000000, lsn = 5d97590/0\n"
|
|
|
|
"P00 WARN: file 'time-mismatch2' has timestamp in the future, enabling delta checksum\n"
|
|
|
|
"P00 WARN: resumable backup 20191003-105320F_20191004-144000D of same type exists"
|
|
|
|
" -- remove invalid files and resume\n"
|
|
|
|
"P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F_20191004-144000D/pg_data/PG_VERSION.gz'"
|
|
|
|
" from resumed backup (reference in manifest)\n"
|
|
|
|
"P00 WARN: remove special file '{[path]}/repo/backup/test1/20191003-105320F_20191004-144000D/pg_data/pipe'"
|
|
|
|
" from resumed backup\n"
|
|
|
|
"P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F_20191004-144000D/pg_data/resume-ref.gz'"
|
|
|
|
" from resumed backup (reference in resumed manifest)\n"
|
|
|
|
"P01 DETAIL: match file from prior backup {[path]}/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: match file from prior backup {[path]}/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P00 WARN: resumed backup file pg_data/time-mismatch2 does not have expected checksum"
|
2020-11-09 16:26:43 -05:00
|
|
|
" 984816fd329622876e14907634264e6f332e9fb3. The file will be recopied and backup will continue but this may be"
|
|
|
|
" an issue unless the resumed backup path in the repository is known to be corrupted.\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
" NOTE: this does not indicate a problem with the PostgreSQL page checksums.\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/time-mismatch2 (4B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: match file from prior backup {[path]}/pg1/PG_VERSION (3B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/resume-ref (0B, [PCT])\n"
|
|
|
|
"P00 DETAIL: hardlink pg_data/PG_VERSION to 20191003-105320F\n"
|
|
|
|
"P00 DETAIL: hardlink pg_data/global/pg_control to 20191003-105320F\n"
|
|
|
|
"P00 DETAIL: hardlink pg_data/postgresql.conf to 20191003-105320F\n"
|
|
|
|
"P00 INFO: diff backup size = [SIZE]\n"
|
|
|
|
"P00 INFO: execute exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
|
|
|
"P00 INFO: backup stop archive = 0000000105D9759000000000, lsn = 5d97590/800000\n"
|
|
|
|
"P00 INFO: check archive for segment(s) 0000000105D9759000000000:0000000105D9759000000000\n"
|
|
|
|
"P00 INFO: new backup label = 20191003-105320F_20191004-144000D");
|
|
|
|
|
|
|
|
// Check repo directory
|
2020-03-18 10:10:10 -04:00
|
|
|
TEST_RESULT_STR_Z_KEYRPL(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191003-105320F_20191004-144000D}\n"
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION.gz {file, s=3}\n"
|
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control.gz {file, s=8192}\n"
|
|
|
|
"pg_data/pg_xlog {path}\n"
|
|
|
|
"pg_data/postgresql.conf.gz {file, s=11}\n"
|
|
|
|
"pg_data/resume-ref.gz {file, s=0}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/time-mismatch2.gz {file, s=4}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
|
|
|
"pg_data={\"path\":\"{[path]}/pg1\",\"type\":\"path\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"06d06bb31b570b94d7b4325f511f853dbe771c21\",\"reference\":\"20191003-105320F\""
|
|
|
|
",\"size\":3,\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/global/pg_control={\"reference\":\"20191003-105320F\",\"size\":8192,\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\""
|
|
|
|
",\"reference\":\"20191003-105320F\",\"size\":11,\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/resume-ref={\"size\":0,\"timestamp\":1570200000}\n"
|
|
|
|
"pg_data/time-mismatch2={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
|
|
|
|
",\"timestamp\":1570200100}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_xlog={}\n",
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
|
|
|
|
// Remove test files
|
|
|
|
storageRemoveP(storagePgWrite(), STRDEF("resume-ref"), .errorOnMissing = true);
|
|
|
|
storageRemoveP(storagePgWrite(), STRDEF("time-mismatch2"), .errorOnMissing = true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-01-26 13:19:13 -07:00
|
|
|
TEST_TITLE("online 9.6 backup-standby full backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 1200000;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Update pg_control
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path)),
|
2019-12-13 17:14:26 -05:00
|
|
|
.timeModified = backupTimeStart),
|
|
|
|
pgControlTestToBuffer((PgControl){.version = PG_VERSION_96, .systemId = 1000000000000000960}));
|
|
|
|
|
|
|
|
// Update version
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(storagePgWrite(), PG_FILE_PGVERSION_STR, .timeModified = backupTimeStart),
|
|
|
|
BUFSTRDEF(PG_VERSION_96_STR));
|
|
|
|
|
|
|
|
// Upgrade stanza
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
harnessCfgLoad(cfgCmdStanzaUpgrade, argList);
|
|
|
|
|
|
|
|
cmdStanzaUpgrade();
|
|
|
|
|
|
|
|
// Load options
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgKeyRaw(argList, cfgOptPgPath, 1, pg1Path);
|
|
|
|
hrnCfgArgKeyRaw(argList, cfgOptPgPath, 2, pg2Path);
|
|
|
|
hrnCfgArgKeyRawZ(argList, cfgOptPgPort, 2, "5433");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_COMPRESS);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_BACKUP_STANDBY);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_START_FAST);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_ARCHIVE_COPY);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
2020-03-18 13:40:16 -04:00
|
|
|
// Create file to copy from the standby. This file will be zero-length on the primary and non-zero-length on the standby
|
2020-03-19 12:11:20 -04:00
|
|
|
// but no bytes will be copied.
|
2020-10-26 10:25:16 -04:00
|
|
|
storagePutP(storageNewWriteP(storagePgIdxWrite(0), STRDEF(PG_PATH_BASE "/1/1"), .timeModified = backupTimeStart), NULL);
|
|
|
|
storagePutP(storageNewWriteP(storagePgIdxWrite(1), STRDEF(PG_PATH_BASE "/1/1")), BUFSTRDEF("1234"));
|
2020-03-18 13:40:16 -04:00
|
|
|
|
|
|
|
// Create file to copy from the standby. This file will be smaller on the primary than the standby and have no common
|
|
|
|
// data in the bytes that exist on primary and standby. If the file is copied from the primary instead of the standby
|
|
|
|
// the checksum will change but not the size.
|
2020-01-26 13:19:13 -07:00
|
|
|
storagePutP(
|
2020-10-26 10:25:16 -04:00
|
|
|
storageNewWriteP(storagePgIdxWrite(0), STRDEF(PG_PATH_BASE "/1/2"), .timeModified = backupTimeStart),
|
2020-03-18 13:40:16 -04:00
|
|
|
BUFSTRDEF("DA"));
|
2020-10-26 10:25:16 -04:00
|
|
|
storagePutP(storageNewWriteP(storagePgIdxWrite(1), STRDEF(PG_PATH_BASE "/1/2")), BUFSTRDEF("5678"));
|
2020-03-18 13:40:16 -04:00
|
|
|
|
|
|
|
// Create file to copy from the standby. This file will be larger on the primary than the standby and have no common
|
|
|
|
// data in the bytes that exist on primary and standby. If the file is copied from the primary instead of the standby
|
|
|
|
// the checksum and size will change.
|
|
|
|
storagePutP(
|
2020-10-26 10:25:16 -04:00
|
|
|
storageNewWriteP(storagePgIdxWrite(0), STRDEF(PG_PATH_BASE "/1/3"), .timeModified = backupTimeStart),
|
2020-03-18 13:40:16 -04:00
|
|
|
BUFSTRDEF("TEST"));
|
2020-10-26 10:25:16 -04:00
|
|
|
storagePutP(storageNewWriteP(storagePgIdxWrite(1), STRDEF(PG_PATH_BASE "/1/3")), BUFSTRDEF("ABC"));
|
2020-01-26 13:19:13 -07:00
|
|
|
|
|
|
|
// Create a file on the primary that does not exist on the standby to test that the file is removed from the manifest
|
|
|
|
storagePutP(
|
2020-10-26 10:25:16 -04:00
|
|
|
storageNewWriteP(storagePgIdxWrite(0), STRDEF(PG_PATH_BASE "/1/0"), .timeModified = backupTimeStart),
|
2020-01-26 13:19:13 -07:00
|
|
|
BUFSTRDEF("DATA"));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Set log level to warn because the following test uses multiple processes so the log order will not be deterministic
|
|
|
|
harnessLogLevelSet(logLevelWarn);
|
|
|
|
|
|
|
|
// Run backup but error on archive check
|
|
|
|
testBackupPqScriptP(PG_VERSION_96, backupTimeStart, .noWal = true, .backupStandby = true);
|
|
|
|
TEST_ERROR(
|
|
|
|
cmdBackup(), ArchiveTimeoutError,
|
|
|
|
"WAL segment 0000000105DA69C000000000 was not archived before the 100ms timeout\n"
|
|
|
|
"HINT: check the archive_command to ensure that all options are correct (especially --stanza).\n"
|
2020-09-03 07:49:49 -04:00
|
|
|
"HINT: check the PostgreSQL server log for errors.\n"
|
|
|
|
"HINT: run the 'start' command if the stanza was previously stopped.");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Remove halted backup so there's no resume
|
|
|
|
storagePathRemoveP(storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191016-042640F"), .recurse = true);
|
|
|
|
|
|
|
|
// Run backup
|
2020-03-06 14:41:03 -05:00
|
|
|
testBackupPqScriptP(PG_VERSION_96, backupTimeStart, .backupStandby = true, .walCompressType = compressTypeGz);
|
2019-12-13 17:14:26 -05:00
|
|
|
TEST_RESULT_VOID(cmdBackup(), "backup");
|
|
|
|
|
|
|
|
// Set log level back to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: no prior backup exists, incr backup has been changed to full");
|
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
TEST_RESULT_STR_Z_KEYRPL(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191016-042640F}\n"
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION {file, s=3}\n"
|
|
|
|
"pg_data/backup_label {file, s=17}\n"
|
|
|
|
"pg_data/base {path}\n"
|
|
|
|
"pg_data/base/1 {path}\n"
|
2020-03-19 13:16:05 -04:00
|
|
|
"pg_data/base/1/1 {file, s=0}\n"
|
|
|
|
"pg_data/base/1/2 {file, s=2}\n"
|
2020-03-18 13:40:16 -04:00
|
|
|
"pg_data/base/1/3 {file, s=3}\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control {file, s=8192}\n"
|
|
|
|
"pg_data/pg_xlog {path}\n"
|
|
|
|
"pg_data/pg_xlog/0000000105DA69C000000000 {file, s=16777216}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/postgresql.conf {file, s=11}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
|
|
|
"pg_data={\"path\":\"{[path]}/pg1\",\"type\":\"path\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"f5b7e6d36dc0113f61b36c700817d42b96f7b037\",\"size\":3"
|
|
|
|
",\"timestamp\":1571200000}\n"
|
|
|
|
"pg_data/backup_label={\"checksum\":\"8e6f41ac87a7514be96260d65bacbffb11be77dc\",\"size\":17"
|
|
|
|
",\"timestamp\":1571200002}\n"
|
2020-03-19 13:16:05 -04:00
|
|
|
"pg_data/base/1/1={\"master\":false,\"size\":0,\"timestamp\":1571200000}\n"
|
|
|
|
"pg_data/base/1/2={\"checksum\":\"54ceb91256e8190e474aa752a6e0650a2df5ba37\",\"master\":false,\"size\":2"
|
2020-03-18 10:10:10 -04:00
|
|
|
",\"timestamp\":1571200000}\n"
|
2020-03-18 13:40:16 -04:00
|
|
|
"pg_data/base/1/3={\"checksum\":\"3c01bdbb26f358bab27f267924aa2c9a03fcfdb8\",\"master\":false,\"size\":3"
|
2020-03-18 10:10:10 -04:00
|
|
|
",\"timestamp\":1571200000}\n"
|
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1571200000}\n"
|
|
|
|
"pg_data/pg_xlog/0000000105DA69C000000000={\"size\":16777216,\"timestamp\":1571200002}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\",\"size\":11"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/base={}\n"
|
|
|
|
"pg_data/base/1={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_xlog={}\n",
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
|
|
|
|
// Remove test files
|
2020-10-26 10:25:16 -04:00
|
|
|
storagePathRemoveP(storagePgIdxWrite(1), NULL, .recurse = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
storagePathRemoveP(storagePgWrite(), STRDEF("base/1"), .recurse = true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online 11 full backup with tablespaces and page checksums");
|
|
|
|
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 2200000;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Update pg_control
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
2020-07-30 07:49:06 -04:00
|
|
|
storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strZ(pg1Path)),
|
2019-12-13 17:14:26 -05:00
|
|
|
.timeModified = backupTimeStart),
|
|
|
|
pgControlTestToBuffer(
|
|
|
|
(PgControl){
|
|
|
|
.version = PG_VERSION_11, .systemId = 1000000000000001100, .pageChecksum = true,
|
|
|
|
.walSegmentSize = 1024 * 1024}));
|
|
|
|
|
|
|
|
// Update version
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(storagePgWrite(), PG_FILE_PGVERSION_STR, .timeModified = backupTimeStart),
|
|
|
|
BUFSTRDEF(PG_VERSION_11_STR));
|
|
|
|
|
|
|
|
// Update wal path
|
|
|
|
storagePathRemoveP(storagePgWrite(), pgWalPath(PG_VERSION_95));
|
|
|
|
storagePathCreateP(storagePgWrite(), pgWalPath(PG_VERSION_11), .noParentCreate = true);
|
|
|
|
|
|
|
|
// Upgrade stanza
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--no-" CFGOPT_ONLINE);
|
|
|
|
harnessCfgLoad(cfgCmdStanzaUpgrade, argList);
|
|
|
|
|
|
|
|
cmdStanzaUpgrade();
|
|
|
|
|
|
|
|
// Load options
|
|
|
|
argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_FULL);
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoHardlink, true);
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_MANIFEST_SAVE_THRESHOLD "=1");
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_ARCHIVE_COPY);
|
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
2020-04-21 17:55:36 -04:00
|
|
|
// Move pg1-path and put a link in its place. This tests that backup works when pg1-path is a symlink yet should be
|
|
|
|
// completely invisible in the manifest and logging.
|
2020-07-30 07:49:06 -04:00
|
|
|
TEST_SYSTEM_FMT("mv %s %s-data", strZ(pg1Path), strZ(pg1Path));
|
|
|
|
TEST_SYSTEM_FMT("ln -s %s-data %s ", strZ(pg1Path), strZ(pg1Path));
|
2020-04-21 17:55:36 -04:00
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// Zeroed file which passes page checksums
|
|
|
|
Buffer *relation = bufNew(PG_PAGE_SIZE_DEFAULT);
|
|
|
|
memset(bufPtr(relation), 0, bufSize(relation));
|
|
|
|
bufUsedSet(relation, bufSize(relation));
|
|
|
|
|
2020-01-23 14:15:58 -07:00
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0};
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), STRDEF(PG_PATH_BASE "/1/1"), .timeModified = backupTimeStart), relation);
|
|
|
|
|
|
|
|
// Zeroed file which will fail on alignment
|
|
|
|
relation = bufNew(PG_PAGE_SIZE_DEFAULT + 1);
|
|
|
|
memset(bufPtr(relation), 0, bufSize(relation));
|
|
|
|
bufUsedSet(relation, bufSize(relation));
|
|
|
|
|
2020-01-23 14:15:58 -07:00
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0};
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), STRDEF(PG_PATH_BASE "/1/2"), .timeModified = backupTimeStart), relation);
|
|
|
|
|
|
|
|
// File with bad page checksums
|
|
|
|
relation = bufNew(PG_PAGE_SIZE_DEFAULT * 4);
|
|
|
|
memset(bufPtr(relation), 0, bufSize(relation));
|
2020-01-23 14:15:58 -07:00
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0xFF};
|
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0x00};
|
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02)) = (PageHeaderData){.pd_upper = 0xFE};
|
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x03)) = (PageHeaderData){.pd_upper = 0xEF};
|
2019-12-13 17:14:26 -05:00
|
|
|
bufUsedSet(relation, bufSize(relation));
|
|
|
|
|
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), STRDEF(PG_PATH_BASE "/1/3"), .timeModified = backupTimeStart), relation);
|
2020-07-30 07:49:06 -04:00
|
|
|
const char *rel1_3Sha1 = strZ(bufHex(cryptoHashOne(HASH_TYPE_SHA1_STR, relation)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// File with bad page checksum
|
|
|
|
relation = bufNew(PG_PAGE_SIZE_DEFAULT * 3);
|
|
|
|
memset(bufPtr(relation), 0, bufSize(relation));
|
2020-01-23 14:15:58 -07:00
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0x00};
|
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0x08};
|
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02)) = (PageHeaderData){.pd_upper = 0x00};
|
2019-12-13 17:14:26 -05:00
|
|
|
bufUsedSet(relation, bufSize(relation));
|
|
|
|
|
|
|
|
storagePutP(storageNewWriteP(storagePgWrite(), STRDEF(PG_PATH_BASE "/1/4"), .timeModified = backupTimeStart), relation);
|
2020-07-30 07:49:06 -04:00
|
|
|
const char *rel1_4Sha1 = strZ(bufHex(cryptoHashOne(HASH_TYPE_SHA1_STR, relation)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Add a tablespace
|
|
|
|
storagePathCreateP(storagePgWrite(), STRDEF(PG_PATH_PGTBLSPC));
|
|
|
|
THROW_ON_SYS_ERROR(
|
2020-07-30 07:49:06 -04:00
|
|
|
symlink("../../pg1-tblspc/32768", strZ(storagePathP(storagePg(), STRDEF(PG_PATH_PGTBLSPC "/32768")))) == -1,
|
2019-12-13 17:14:26 -05:00
|
|
|
FileOpenError, "unable to create symlink");
|
|
|
|
|
|
|
|
storagePutP(
|
|
|
|
storageNewWriteP(
|
2020-09-18 16:55:26 -04:00
|
|
|
storageTest,
|
|
|
|
strNewFmt("pg1-tblspc/32768/%s/1/5", strZ(pgTablespaceId(PG_VERSION_11, pgCatalogTestVersion(PG_VERSION_11)))),
|
2019-12-13 17:14:26 -05:00
|
|
|
.timeModified = backupTimeStart),
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
// Disable storageFeatureSymLink so tablespace (and latest) symlinks will not be created
|
|
|
|
((Storage *)storageRepoWrite())->interface.feature ^= 1 << storageFeatureSymLink;
|
|
|
|
|
|
|
|
// Disable storageFeatureHardLink so hardlinks will not be created
|
|
|
|
((Storage *)storageRepoWrite())->interface.feature ^= 1 << storageFeatureHardLink;
|
|
|
|
|
|
|
|
// Run backup
|
2020-03-06 14:41:03 -05:00
|
|
|
testBackupPqScriptP(PG_VERSION_11, backupTimeStart, .walCompressType = compressTypeGz, .walTotal = 3);
|
2019-12-13 17:14:26 -05:00
|
|
|
TEST_RESULT_VOID(cmdBackup(), "backup");
|
|
|
|
|
|
|
|
// Reset storage features
|
|
|
|
((Storage *)storageRepoWrite())->interface.feature |= 1 << storageFeatureSymLink;
|
|
|
|
((Storage *)storageRepoWrite())->interface.feature |= 1 << storageFeatureHardLink;
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
|
|
|
"P00 INFO: backup start archive = 0000000105DB5DE000000000, lsn = 5db5de0/0\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/base/1/3 (32KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P00 WARN: invalid page checksums found in file {[path]}/pg1/base/1/3 at pages 0, 2-3\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/base/1/4 (24KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P00 WARN: invalid page checksum found in file {[path]}/pg1/base/1/4 at page 1\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/base/1/2 (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P00 WARN: page misalignment in file {[path]}/pg1/base/1/2: file size 8193 is not divisible by page size 8192\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/base/1/1 (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/PG_VERSION (2B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 INFO: backup file {[path]}/pg1/pg_tblspc/32768/PG_11_201809051/1/5 (0B, [PCT])\n"
|
|
|
|
"P00 INFO: full backup size = [SIZE]\n"
|
|
|
|
"P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
|
|
|
"P00 INFO: backup stop archive = 0000000105DB5DE000000002, lsn = 5db5de0/280000\n"
|
|
|
|
"P00 DETAIL: wrote 'backup_label' file returned from pg_stop_backup()\n"
|
|
|
|
"P00 INFO: check archive for segment(s) 0000000105DB5DE000000000:0000000105DB5DE000000002\n"
|
|
|
|
"P00 INFO: new backup label = 20191027-181320F");
|
|
|
|
|
2020-07-20 09:47:43 -04:00
|
|
|
TEST_RESULT_STR_KEYRPL(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191027-181320F")),
|
2020-07-20 09:47:43 -04:00
|
|
|
strNewFmt(
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION.gz {file, s=2}\n"
|
|
|
|
"pg_data/backup_label.gz {file, s=17}\n"
|
|
|
|
"pg_data/base {path}\n"
|
|
|
|
"pg_data/base/1 {path}\n"
|
|
|
|
"pg_data/base/1/1.gz {file, s=8192}\n"
|
|
|
|
"pg_data/base/1/2.gz {file, s=8193}\n"
|
|
|
|
"pg_data/base/1/3.gz {file, s=32768}\n"
|
|
|
|
"pg_data/base/1/4.gz {file, s=24576}\n"
|
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control.gz {file, s=8192}\n"
|
|
|
|
"pg_data/pg_tblspc {path}\n"
|
|
|
|
"pg_data/pg_wal {path}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000000.gz {file, s=1048576}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000001.gz {file, s=1048576}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000002.gz {file, s=1048576}\n"
|
|
|
|
"pg_data/postgresql.conf.gz {file, s=11}\n"
|
|
|
|
"pg_tblspc {path}\n"
|
|
|
|
"pg_tblspc/32768 {path}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051 {path}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1 {path}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1/5.gz {file, s=0}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
|
|
|
"pg_data={\"path\":\"{[path]}/pg1\",\"type\":\"path\"}\n"
|
|
|
|
"pg_tblspc/32768={\"path\":\"../../pg1-tblspc/32768\",\"tablespace-id\":\"32768\""
|
|
|
|
",\"tablespace-name\":\"tblspc32768\",\"type\":\"link\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"17ba0791499db908433b80f37c5fbc89b870084b\",\"size\":2"
|
|
|
|
",\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/backup_label={\"checksum\":\"8e6f41ac87a7514be96260d65bacbffb11be77dc\",\"size\":17"
|
|
|
|
",\"timestamp\":1572200002}\n"
|
|
|
|
"pg_data/base/1/1={\"checksum\":\"0631457264ff7f8d5fb1edc2c0211992a67c73e6\",\"checksum-page\":true"
|
|
|
|
",\"master\":false,\"size\":8192,\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/base/1/2={\"checksum\":\"8beb58e08394fe665fb04a17b4003faa3802760b\",\"checksum-page\":false"
|
|
|
|
",\"master\":false,\"size\":8193,\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/base/1/3={\"checksum\":\"%s\",\"checksum-page\":false,\"checksum-page-error\":[0,[2,3]]"
|
|
|
|
",\"master\":false,\"size\":32768,\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/base/1/4={\"checksum\":\"%s\",\"checksum-page\":false,\"checksum-page-error\":[1],\"master\":false"
|
|
|
|
",\"size\":24576,\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000000={\"size\":1048576,\"timestamp\":1572200002}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000001={\"size\":1048576,\"timestamp\":1572200002}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000002={\"size\":1048576,\"timestamp\":1572200002}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\",\"size\":11"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1/5={\"checksum-page\":true,\"master\":false,\"size\":0"
|
|
|
|
",\"timestamp\":1572200000}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:link]\n"
|
|
|
|
"pg_data/pg_tblspc/32768={\"destination\":\"../../pg1-tblspc/32768\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/base={}\n"
|
|
|
|
"pg_data/base/1={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_tblspc={}\n"
|
|
|
|
"pg_data/pg_wal={}\n"
|
|
|
|
"pg_tblspc={}\n"
|
|
|
|
"pg_tblspc/32768={}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051={}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1={}\n",
|
|
|
|
rel1_3Sha1, rel1_4Sha1),
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
|
|
|
|
// Remove test files
|
|
|
|
storagePathRemoveP(storagePgWrite(), STRDEF("base/1"), .recurse = true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when pg_control not present");
|
|
|
|
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 2300000;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Load options
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_INCR);
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoHardlink, true);
|
2019-12-13 17:14:26 -05:00
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
// Run backup
|
|
|
|
testBackupPqScriptP(PG_VERSION_11, backupTimeStart, .errorAfterStart = true);
|
|
|
|
TEST_ERROR(
|
|
|
|
cmdBackup(), FileMissingError,
|
|
|
|
"pg_control must be present in all online backups\n"
|
|
|
|
"HINT: is something wrong with the clock or filesystem timestamps?");
|
|
|
|
|
|
|
|
// Check log
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = 20191027-181320F, version = " PROJECT_VERSION "\n"
|
|
|
|
"P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
|
|
|
"P00 INFO: backup start archive = 0000000105DB764000000000, lsn = 5db7640/0");
|
|
|
|
|
|
|
|
// Remove partial backup so it won't be resumed (since it errored before any checksums were written)
|
|
|
|
storagePathRemoveP(
|
|
|
|
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191027-181320F_20191028-220000I"), .recurse = true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online 11 incr backup with tablespaces");
|
|
|
|
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 2400000;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Load options
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_STANZA "=test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2019-12-13 17:14:26 -05:00
|
|
|
strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_INCR);
|
|
|
|
strLstAddZ(argList, "--" CFGOPT_DELTA);
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoHardlink, true);
|
2019-12-13 17:14:26 -05:00
|
|
|
harnessCfgLoad(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
// Update pg_control timestamp
|
|
|
|
THROW_ON_SYS_ERROR(
|
|
|
|
utime(
|
2020-07-30 07:49:06 -04:00
|
|
|
strZ(storagePathP(storagePg(), STRDEF("global/pg_control"))),
|
2019-12-13 17:14:26 -05:00
|
|
|
&(struct utimbuf){.actime = backupTimeStart, .modtime = backupTimeStart}) != 0, FileWriteError,
|
|
|
|
"unable to set time");
|
|
|
|
|
2020-01-21 10:29:46 -07:00
|
|
|
// Run backup. Make sure that the timeline selected converts to hexdecimal that can't be interpreted as decimal.
|
|
|
|
testBackupPqScriptP(PG_VERSION_11, backupTimeStart, .timeline = 0x2C);
|
2019-12-13 17:14:26 -05:00
|
|
|
TEST_RESULT_VOID(cmdBackup(), "backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = 20191027-181320F, version = " PROJECT_VERSION "\n"
|
|
|
|
"P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
2020-01-21 10:29:46 -07:00
|
|
|
"P00 INFO: backup start archive = 0000002C05DB8EB000000000, lsn = 5db8eb0/0\n"
|
|
|
|
"P00 WARN: a timeline switch has occurred since the 20191027-181320F backup, enabling delta checksum\n"
|
2020-06-16 13:20:01 -04:00
|
|
|
" HINT: this is normal after restoring from backup or promoting a standby.\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P01 DETAIL: match file from prior backup {[path]}/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: match file from prior backup {[path]}/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: match file from prior backup {[path]}/pg1/PG_VERSION (2B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P00 DETAIL: hardlink pg_data/PG_VERSION to 20191027-181320F\n"
|
|
|
|
"P00 DETAIL: hardlink pg_data/global/pg_control to 20191027-181320F\n"
|
|
|
|
"P00 DETAIL: hardlink pg_data/postgresql.conf to 20191027-181320F\n"
|
|
|
|
"P00 DETAIL: hardlink pg_tblspc/32768/PG_11_201809051/1/5 to 20191027-181320F\n"
|
|
|
|
"P00 INFO: incr backup size = [SIZE]\n"
|
|
|
|
"P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
2020-01-21 10:29:46 -07:00
|
|
|
"P00 INFO: backup stop archive = 0000002C05DB8EB000000000, lsn = 5db8eb0/80000\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 DETAIL: wrote 'backup_label' file returned from pg_stop_backup()\n"
|
2020-01-21 10:29:46 -07:00
|
|
|
"P00 INFO: check archive for segment(s) 0000002C05DB8EB000000000:0000002C05DB8EB000000000\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 INFO: new backup label = 20191027-181320F_20191030-014640I");
|
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
TEST_RESULT_STR_Z_KEYRPL(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191027-181320F_20191030-014640I}\n"
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION.gz {file, s=2}\n"
|
|
|
|
"pg_data/backup_label.gz {file, s=17}\n"
|
|
|
|
"pg_data/base {path}\n"
|
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control.gz {file, s=8192}\n"
|
|
|
|
"pg_data/pg_tblspc {path}\n"
|
|
|
|
"pg_data/pg_tblspc/32768 {link, d=../../pg_tblspc/32768}\n"
|
|
|
|
"pg_data/pg_wal {path}\n"
|
|
|
|
"pg_data/postgresql.conf.gz {file, s=11}\n"
|
|
|
|
"pg_tblspc {path}\n"
|
|
|
|
"pg_tblspc/32768 {path}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051 {path}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1 {path}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_tblspc/32768/PG_11_201809051/1/5.gz {file, s=0}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
|
|
|
"pg_data={\"path\":\"{[path]}/pg1\",\"type\":\"path\"}\n"
|
2020-11-09 16:26:43 -05:00
|
|
|
"pg_tblspc/32768={\"path\":\"../../pg1-tblspc/32768\",\"tablespace-id\":\"32768\""
|
|
|
|
",\"tablespace-name\":\"tblspc32768\",\"type\":\"link\"}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"17ba0791499db908433b80f37c5fbc89b870084b\",\"reference\":\"20191027-181320F\""
|
|
|
|
",\"size\":2,\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/backup_label={\"checksum\":\"8e6f41ac87a7514be96260d65bacbffb11be77dc\",\"size\":17"
|
|
|
|
",\"timestamp\":1572400002}\n"
|
|
|
|
"pg_data/global/pg_control={\"reference\":\"20191027-181320F\",\"size\":8192,\"timestamp\":1572400000}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\""
|
|
|
|
",\"reference\":\"20191027-181320F\",\"size\":11,\"timestamp\":1570000000}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1/5={\"checksum-page\":true,\"master\":false,\"reference\":\"20191027-181320F\""
|
|
|
|
",\"size\":0,\"timestamp\":1572200000}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:link]\n"
|
|
|
|
"pg_data/pg_tblspc/32768={\"destination\":\"../../pg1-tblspc/32768\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/base={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_tblspc={}\n"
|
|
|
|
"pg_data/pg_wal={}\n"
|
|
|
|
"pg_tblspc={}\n"
|
|
|
|
"pg_tblspc/32768={}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051={}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1={}\n",
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
|
|
|
|
// Remove test files
|
|
|
|
storagePathRemoveP(storagePgWrite(), STRDEF("base/1"), .recurse = true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
FUNCTION_HARNESS_RESULT_VOID();
|
|
|
|
}
|