2019-07-25 14:34:16 -04:00
|
|
|
/***********************************************************************************************************************************
|
|
|
|
Test Backup Command
|
|
|
|
***********************************************************************************************************************************/
|
2019-12-13 17:14:26 -05:00
|
|
|
#include "command/stanza/create.h"
|
|
|
|
#include "command/stanza/upgrade.h"
|
2020-03-18 10:10:10 -04:00
|
|
|
#include "common/crypto/hash.h"
|
2019-07-25 14:34:16 -04:00
|
|
|
#include "common/io/bufferWrite.h"
|
2020-05-18 19:11:26 -04:00
|
|
|
#include "postgres/interface/static.vendor.h"
|
2019-07-25 14:34:16 -04:00
|
|
|
#include "storage/helper.h"
|
|
|
|
#include "storage/posix/storage.h"
|
|
|
|
|
|
|
|
#include "common/harnessConfig.h"
|
2021-05-17 07:20:28 -04:00
|
|
|
#include "common/harnessPostgres.h"
|
2019-12-13 17:14:26 -05:00
|
|
|
#include "common/harnessPq.h"
|
2021-09-24 17:40:31 -04:00
|
|
|
#include "common/harnessPack.h"
|
2021-05-21 12:45:00 -04:00
|
|
|
#include "common/harnessProtocol.h"
|
2021-03-11 14:40:14 -05:00
|
|
|
#include "common/harnessStorage.h"
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
/***********************************************************************************************************************************
|
2020-03-18 10:10:10 -04:00
|
|
|
Get a list of all files in the backup and a redacted version of the manifest that can be tested against a static string
|
2019-12-13 17:14:26 -05:00
|
|
|
***********************************************************************************************************************************/
|
|
|
|
typedef struct TestBackupValidateCallbackData
|
|
|
|
{
|
|
|
|
const Storage *storage; // Storage object when needed (e.g. fileCompressed = true)
|
|
|
|
const String *path; // Subpath when storage is specified
|
Pack manifest file structs to save memory.
Manifests with a very large number of files can use a considerable amount of memory. There are a lot of zeroes in the data so it can be stored more efficiently by using base-128 varint encoding for the integers and storing the strings in the same allocation.
The downside is that the data needs to be unpacked in order to be used, but in most cases this seems fast enough (about 10% slower than before) except for saving the manifest, which is 10% slower up to 10 million files and then gets about 5x slower by 100 million (two minutes on my M1 Mac). Profiling does not show this slowdown so I wonder if this is related to the change in memory layout. Curiously, the function that increased most was jsonFromStrInternal(), which was not modified. That gives more weight to the idea that there is some kind of memory issue going on here and one hopes that servers would be less affected. Either way, they largest use cases we have seen are for about 6 million files so if we can improve that case I believe we will be better off.
Further analysis showed that most of the time was taken up writing the size and timestamp fields, which makes almost no sense. The same amount of time was used if they were hard-coded to 0, which points to some odd memory issue on the M1 architecture.
This change has been planned for a while, but the particular impetus at this time is that small file support requires additional fields that would increase manifest memory usage by about 20%, even if the feature is not used.
Note that the Pack code has been updated to use the new varint encoder, but the decoder remains separate because it needs to fetch one byte at a time.
2022-01-21 17:05:07 -05:00
|
|
|
Manifest *manifest; // Manifest to check for files/links/paths
|
2020-03-18 10:10:10 -04:00
|
|
|
const ManifestData *manifestData; // Manifest data
|
2019-12-13 17:14:26 -05:00
|
|
|
String *content; // String where content should be added
|
|
|
|
} TestBackupValidateCallbackData;
|
|
|
|
|
2021-10-18 12:22:48 -04:00
|
|
|
static void
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidateCallback(void *callbackData, const StorageInfo *info)
|
|
|
|
{
|
|
|
|
TestBackupValidateCallbackData *data = callbackData;
|
|
|
|
|
|
|
|
// Don't include . when it is a path (we'll still include it when it is a link so we can see the destination)
|
|
|
|
if (info->type == storageTypePath && strEq(info->name, DOT_STR))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Don't include backup.manifest or copy. We'll test that they are present elsewhere
|
|
|
|
if (info->type == storageTypeFile &&
|
|
|
|
(strEqZ(info->name, BACKUP_MANIFEST_FILE) || strEqZ(info->name, BACKUP_MANIFEST_FILE INFO_COPY_EXT)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (info->type)
|
|
|
|
{
|
|
|
|
case storageTypeFile:
|
|
|
|
{
|
2022-02-14 13:24:14 -06:00
|
|
|
// Test mode, user, group. These values are not in the manifest but we know what they should be based on the default
|
|
|
|
// mode and current user/group.
|
|
|
|
// ---------------------------------------------------------------------------------------------------------------------
|
|
|
|
if (info->mode != 0640)
|
|
|
|
THROW_FMT(AssertError, "'%s' mode is not 0640", strZ(info->name));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
if (!strEq(info->user, TEST_USER_STR))
|
|
|
|
THROW_FMT(AssertError, "'%s' user should be '" TEST_USER "'", strZ(info->name));
|
|
|
|
|
|
|
|
if (!strEq(info->group, TEST_GROUP_STR))
|
|
|
|
THROW_FMT(AssertError, "'%s' group should be '" TEST_GROUP "'", strZ(info->name));
|
|
|
|
|
|
|
|
// Build file list (needed because bundles can contain multiple files)
|
2020-03-18 10:10:10 -04:00
|
|
|
// ---------------------------------------------------------------------------------------------------------------------
|
2022-02-14 13:24:14 -06:00
|
|
|
List *const fileList = lstNewP(sizeof(ManifestFilePack **));
|
|
|
|
bool bundle = strBeginsWithZ(info->name, "bundle/");
|
2020-03-06 14:41:03 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
if (bundle)
|
2019-12-13 17:14:26 -05:00
|
|
|
{
|
2022-04-28 09:50:23 -04:00
|
|
|
const uint64_t bundleId = cvtZToUInt64(strZ(info->name) + sizeof("bundle"));
|
2022-02-14 13:24:14 -06:00
|
|
|
|
|
|
|
for (unsigned int fileIdx = 0; fileIdx < manifestFileTotal(data->manifest); fileIdx++)
|
|
|
|
{
|
|
|
|
ManifestFilePack **const filePack = lstGet(data->manifest->pub.fileList, fileIdx);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
if (manifestFileUnpack(data->manifest, *filePack).bundleId == bundleId)
|
|
|
|
lstAdd(fileList, &filePack);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
const String *manifestName = info->name;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
if (data->manifestData->backupOptionCompressType != compressTypeNone)
|
|
|
|
{
|
|
|
|
manifestName = strSubN(
|
|
|
|
info->name, 0, strSize(info->name) - strSize(compressExtStr(data->manifestData->backupOptionCompressType)));
|
|
|
|
}
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
ManifestFilePack **const filePack = manifestFilePackFindInternal(data->manifest, manifestName);
|
|
|
|
lstAdd(fileList, &filePack);
|
|
|
|
}
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
// Check files
|
2020-03-18 10:10:10 -04:00
|
|
|
// ---------------------------------------------------------------------------------------------------------------------
|
2022-02-14 13:24:14 -06:00
|
|
|
for (unsigned int fileIdx = 0; fileIdx < lstSize(fileList); fileIdx++)
|
|
|
|
{
|
|
|
|
ManifestFilePack **const filePack = *(ManifestFilePack ***)lstGet(fileList, fileIdx);
|
|
|
|
ManifestFile file = manifestFileUnpack(data->manifest, *filePack);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
if (bundle)
|
|
|
|
strCatFmt(data->content, "%s/%s {file", strZ(info->name), strZ(file.name));
|
|
|
|
else
|
|
|
|
strCatFmt(data->content, "%s {file", strZ(info->name));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
// Calculate checksum/size and decompress if needed
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------
|
|
|
|
StorageRead *read = storageNewReadP(
|
|
|
|
data->storage, strNewFmt("%s/%s", strZ(data->path), strZ(info->name)), .offset = file.bundleOffset,
|
|
|
|
.limit = VARUINT64(file.sizeRepo));
|
2020-03-18 10:10:10 -04:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
if (data->manifestData->backupOptionCompressType != compressTypeNone)
|
|
|
|
{
|
|
|
|
ioFilterGroupAdd(
|
|
|
|
ioReadFilterGroup(storageReadIo(read)), decompressFilter(data->manifestData->backupOptionCompressType));
|
|
|
|
}
|
2020-03-18 10:10:10 -04:00
|
|
|
|
2022-05-25 15:27:53 -04:00
|
|
|
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), cryptoHashNew(hashTypeSha1));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
uint64_t size = bufUsed(storageGetP(read));
|
|
|
|
const String *checksum = pckReadStrP(
|
|
|
|
ioFilterGroupResultP(ioReadFilterGroup(storageReadIo(read)), CRYPTO_HASH_FILTER_TYPE));
|
2020-03-18 10:10:10 -04:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
strCatFmt(data->content, ", s=%" PRIu64, size);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
if (!strEqZ(checksum, file.checksumSha1))
|
|
|
|
THROW_FMT(AssertError, "'%s' checksum does match manifest", strZ(file.name));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
// Test size and repo-size. If compressed then set the repo-size to size so it will not be in test output. Even the
|
|
|
|
// same compression algorithm can give slightly different results based on the version so repo-size is not
|
|
|
|
// deterministic for compression.
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------
|
|
|
|
if (size != file.size)
|
|
|
|
THROW_FMT(AssertError, "'%s' size does match manifest", strZ(file.name));
|
|
|
|
|
|
|
|
// Repo size can only be compared to file size when not bundled
|
|
|
|
if (!bundle)
|
|
|
|
{
|
|
|
|
if (info->size != file.sizeRepo)
|
|
|
|
THROW_FMT(AssertError, "'%s' repo size does match manifest", strZ(file.name));
|
|
|
|
}
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
if (data->manifestData->backupOptionCompressType != compressTypeNone)
|
|
|
|
file.sizeRepo = file.size;
|
|
|
|
|
|
|
|
// Bundle id/offset are too noisy so remove them. They are checked size/checksum and listed with the files.
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------
|
|
|
|
file.bundleId = 0;
|
|
|
|
file.bundleOffset = 0;
|
|
|
|
|
|
|
|
// pg_control and WAL headers have different checksums depending on cpu architecture so remove the checksum from the
|
|
|
|
// test output.
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------
|
|
|
|
if (strEqZ(file.name, MANIFEST_TARGET_PGDATA "/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL) ||
|
|
|
|
strBeginsWith(
|
|
|
|
file.name, strNewFmt(MANIFEST_TARGET_PGDATA "/%s/", strZ(pgWalPath(data->manifestData->pgVersion)))))
|
|
|
|
{
|
|
|
|
file.checksumSha1[0] = '\0';
|
|
|
|
}
|
|
|
|
|
|
|
|
strCatZ(data->content, "}\n");
|
|
|
|
|
|
|
|
// Update changes to manifest file
|
|
|
|
manifestFilePackUpdate(data->manifest, filePack, &file);
|
|
|
|
}
|
Pack manifest file structs to save memory.
Manifests with a very large number of files can use a considerable amount of memory. There are a lot of zeroes in the data so it can be stored more efficiently by using base-128 varint encoding for the integers and storing the strings in the same allocation.
The downside is that the data needs to be unpacked in order to be used, but in most cases this seems fast enough (about 10% slower than before) except for saving the manifest, which is 10% slower up to 10 million files and then gets about 5x slower by 100 million (two minutes on my M1 Mac). Profiling does not show this slowdown so I wonder if this is related to the change in memory layout. Curiously, the function that increased most was jsonFromStrInternal(), which was not modified. That gives more weight to the idea that there is some kind of memory issue going on here and one hopes that servers would be less affected. Either way, they largest use cases we have seen are for about 6 million files so if we can improve that case I believe we will be better off.
Further analysis showed that most of the time was taken up writing the size and timestamp fields, which makes almost no sense. The same amount of time was used if they were hard-coded to 0, which points to some odd memory issue on the M1 architecture.
This change has been planned for a while, but the particular impetus at this time is that small file support requires additional fields that would increase manifest memory usage by about 20%, even if the feature is not used.
Note that the Pack code has been updated to use the new varint encoder, but the decoder remains separate because it needs to fetch one byte at a time.
2022-01-21 17:05:07 -05:00
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case storageTypeLink:
|
2022-02-14 13:24:14 -06:00
|
|
|
strCatFmt(data->content, "%s {link, d=%s}\n", strZ(info->name), strZ(info->linkDestination));
|
2019-12-13 17:14:26 -05:00
|
|
|
break;
|
|
|
|
|
|
|
|
case storageTypePath:
|
|
|
|
{
|
2022-02-14 13:24:14 -06:00
|
|
|
strCatFmt(data->content, "%s {path", strZ(info->name));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Check against the manifest
|
2020-03-18 10:10:10 -04:00
|
|
|
// ---------------------------------------------------------------------------------------------------------------------
|
2022-02-14 13:24:14 -06:00
|
|
|
if (!strEq(info->name, STRDEF("bundle")))
|
|
|
|
manifestPathFind(data->manifest, info->name);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-03-18 10:10:10 -04:00
|
|
|
// Test mode, user, group. These values are not in the manifest but we know what they should be based on the default
|
|
|
|
// mode and current user/group.
|
2019-12-13 17:14:26 -05:00
|
|
|
if (info->mode != 0750)
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "'%s' mode is not 00750", strZ(info->name));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-05-22 09:30:54 -04:00
|
|
|
if (!strEq(info->user, TEST_USER_STR))
|
|
|
|
THROW_FMT(AssertError, "'%s' user should be '" TEST_USER "'", strZ(info->name));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-05-22 09:30:54 -04:00
|
|
|
if (!strEq(info->group, TEST_GROUP_STR))
|
|
|
|
THROW_FMT(AssertError, "'%s' group should be '" TEST_GROUP "'", strZ(info->name));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
strCatZ(data->content, "}\n");
|
2019-12-13 17:14:26 -05:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case storageTypeSpecial:
|
2020-07-30 07:49:06 -04:00
|
|
|
THROW_FMT(AssertError, "unexpected special file '%s'", strZ(info->name));
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static String *
|
|
|
|
testBackupValidate(const Storage *storage, const String *path)
|
|
|
|
{
|
|
|
|
FUNCTION_HARNESS_BEGIN();
|
|
|
|
FUNCTION_HARNESS_PARAM(STORAGE, storage);
|
|
|
|
FUNCTION_HARNESS_PARAM(STRING, path);
|
|
|
|
FUNCTION_HARNESS_END();
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
ASSERT(storage != NULL);
|
|
|
|
ASSERT(path != NULL);
|
|
|
|
|
2021-05-21 17:36:43 -04:00
|
|
|
String *result = strNew();
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
MEM_CONTEXT_TEMP_BEGIN()
|
|
|
|
{
|
|
|
|
// Build a list of files in the backup path and verify against the manifest
|
2020-03-18 10:10:10 -04:00
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-07-30 07:49:06 -04:00
|
|
|
Manifest *manifest = manifestLoadFile(storage, strNewFmt("%s/" BACKUP_MANIFEST_FILE, strZ(path)), cipherTypeNone, NULL);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TestBackupValidateCallbackData callbackData =
|
|
|
|
{
|
|
|
|
.storage = storage,
|
|
|
|
.path = path,
|
|
|
|
.content = result,
|
|
|
|
.manifest = manifest,
|
2020-03-18 10:10:10 -04:00
|
|
|
.manifestData = manifestData(manifest),
|
2019-12-13 17:14:26 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
storageInfoListP(storage, path, testBackupValidateCallback, &callbackData, .recurse = true, .sortOrder = sortOrderAsc);
|
2020-03-18 10:10:10 -04:00
|
|
|
|
|
|
|
// Make sure both backup.manifest files exist since we skipped them in the callback above
|
2020-07-30 07:49:06 -04:00
|
|
|
if (!storageExistsP(storage, strNewFmt("%s/" BACKUP_MANIFEST_FILE, strZ(path))))
|
2020-03-18 10:10:10 -04:00
|
|
|
THROW(AssertError, BACKUP_MANIFEST_FILE " is missing");
|
|
|
|
|
2020-07-30 07:49:06 -04:00
|
|
|
if (!storageExistsP(storage, strNewFmt("%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strZ(path))))
|
2020-03-18 10:10:10 -04:00
|
|
|
THROW(AssertError, BACKUP_MANIFEST_FILE INFO_COPY_EXT " is missing");
|
|
|
|
|
|
|
|
// Output the manifest to a string and exclude sections that don't need validation. Note that each of these sections should
|
|
|
|
// be considered from automatic validation but adding them to the output will make the tests too noisy. One good technique
|
|
|
|
// would be to remove it from the output only after validation so new values will cause changes in the output.
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
Buffer *manifestSaveBuffer = bufNew(0);
|
|
|
|
manifestSave(manifest, ioBufferWriteNew(manifestSaveBuffer));
|
|
|
|
|
2021-05-21 17:36:43 -04:00
|
|
|
String *manifestEdit = strNew();
|
2020-03-18 10:10:10 -04:00
|
|
|
StringList *manifestLine = strLstNewSplitZ(strTrim(strNewBuf(manifestSaveBuffer)), "\n");
|
|
|
|
bool bSkipSection = false;
|
|
|
|
|
|
|
|
for (unsigned int lineIdx = 0; lineIdx < strLstSize(manifestLine); lineIdx++)
|
|
|
|
{
|
|
|
|
const String *line = strTrim(strLstGet(manifestLine, lineIdx));
|
|
|
|
|
|
|
|
if (strChr(line, '[') == 0)
|
|
|
|
{
|
|
|
|
const String *section = strSubN(line, 1, strSize(line) - 2);
|
|
|
|
|
2022-04-25 09:06:26 -04:00
|
|
|
if (strEqZ(section, INFO_SECTION_BACKREST) ||
|
|
|
|
strEqZ(section, MANIFEST_SECTION_BACKUP) ||
|
|
|
|
strEqZ(section, MANIFEST_SECTION_BACKUP_DB) ||
|
|
|
|
strEqZ(section, MANIFEST_SECTION_BACKUP_OPTION) ||
|
|
|
|
strEqZ(section, MANIFEST_SECTION_DB) ||
|
|
|
|
strEqZ(section, MANIFEST_SECTION_TARGET_FILE_DEFAULT) ||
|
|
|
|
strEqZ(section, MANIFEST_SECTION_TARGET_LINK_DEFAULT) ||
|
|
|
|
strEqZ(section, MANIFEST_SECTION_TARGET_PATH_DEFAULT))
|
2020-03-18 10:10:10 -04:00
|
|
|
{
|
|
|
|
bSkipSection = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
bSkipSection = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!bSkipSection)
|
2020-07-30 07:49:06 -04:00
|
|
|
strCatFmt(manifestEdit, "%s\n", strZ(line));
|
2020-03-18 10:10:10 -04:00
|
|
|
}
|
|
|
|
|
2020-07-30 07:49:06 -04:00
|
|
|
strCatFmt(result, "--------\n%s\n", strZ(strTrim(manifestEdit)));
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
MEM_CONTEXT_TEMP_END();
|
|
|
|
|
2021-03-10 18:42:22 -05:00
|
|
|
FUNCTION_HARNESS_RETURN(STRING, result);
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************************************************************************************
|
|
|
|
Generate pq scripts for versions of PostgreSQL
|
|
|
|
***********************************************************************************************************************************/
|
|
|
|
typedef struct TestBackupPqScriptParam
|
|
|
|
{
|
|
|
|
VAR_PARAM_HEADER;
|
|
|
|
bool startFast;
|
|
|
|
bool backupStandby;
|
|
|
|
bool errorAfterStart;
|
|
|
|
bool noWal; // Don't write test WAL segments
|
2021-11-18 16:18:10 -05:00
|
|
|
bool noPriorWal; // Don't write prior test WAL segments
|
|
|
|
bool noArchiveCheck; // Do not check archive
|
2020-03-06 14:41:03 -05:00
|
|
|
CompressType walCompressType; // Compress type for the archive files
|
2019-12-13 17:14:26 -05:00
|
|
|
unsigned int walTotal; // Total WAL to write
|
2020-01-21 10:29:46 -07:00
|
|
|
unsigned int timeline; // Timeline to use for WAL files
|
2019-12-13 17:14:26 -05:00
|
|
|
} TestBackupPqScriptParam;
|
|
|
|
|
2021-03-11 14:11:21 -05:00
|
|
|
#define testBackupPqScriptP(pgVersion, backupStartTime, ...) \
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupPqScript(pgVersion, backupStartTime, (TestBackupPqScriptParam){VAR_PARAM_INIT, __VA_ARGS__})
|
|
|
|
|
|
|
|
static void
|
|
|
|
testBackupPqScript(unsigned int pgVersion, time_t backupTimeStart, TestBackupPqScriptParam param)
|
|
|
|
{
|
2021-05-22 09:30:54 -04:00
|
|
|
const char *pg1Path = TEST_PATH "/pg1";
|
|
|
|
const char *pg2Path = TEST_PATH "/pg2";
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-01-21 10:29:46 -07:00
|
|
|
// If no timeline specified then use timeline 1
|
|
|
|
param.timeline = param.timeline == 0 ? 1 : param.timeline;
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// Read pg_control to get info about the cluster
|
|
|
|
PgControl pgControl = pgControlFromFile(storagePg());
|
|
|
|
|
|
|
|
// Set archive timeout really small to save time on errors
|
2020-12-09 08:59:51 -05:00
|
|
|
cfgOptionSet(cfgOptArchiveTimeout, cfgSourceParam, varNewInt64(100));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-12-07 09:21:07 -05:00
|
|
|
// Set LSN and WAL start/stop
|
2019-12-13 17:14:26 -05:00
|
|
|
uint64_t lsnStart = ((uint64_t)backupTimeStart & 0xFFFFFF00) << 28;
|
|
|
|
uint64_t lsnStop =
|
|
|
|
lsnStart + ((param.walTotal == 0 ? 0 : param.walTotal - 1) * pgControl.walSegmentSize) + (pgControl.walSegmentSize / 2);
|
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
const char *walSegmentPrior = strZ(
|
|
|
|
pgLsnToWalSegment(param.timeline, lsnStart - pgControl.walSegmentSize, pgControl.walSegmentSize));
|
2020-07-30 07:49:06 -04:00
|
|
|
const char *lsnStartStr = strZ(pgLsnToStr(lsnStart));
|
|
|
|
const char *walSegmentStart = strZ(pgLsnToWalSegment(param.timeline, lsnStart, pgControl.walSegmentSize));
|
|
|
|
const char *lsnStopStr = strZ(pgLsnToStr(lsnStop));
|
|
|
|
const char *walSegmentStop = strZ(pgLsnToWalSegment(param.timeline, lsnStop, pgControl.walSegmentSize));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-12-07 09:21:07 -05:00
|
|
|
// Save pg_control with updated info
|
|
|
|
pgControl.checkpoint = lsnStart;
|
|
|
|
pgControl.timeline = param.timeline;
|
|
|
|
|
|
|
|
HRN_STORAGE_PUT(
|
|
|
|
storagePgIdxWrite(0), PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, hrnPgControlToBuffer(pgControl),
|
|
|
|
.timeModified = backupTimeStart);
|
|
|
|
|
2021-11-30 13:23:11 -05:00
|
|
|
// Update pg_control on primary with the backup time
|
|
|
|
HRN_PG_CONTROL_TIME(storagePgIdxWrite(0), backupTimeStart);
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// Write WAL segments to the archive
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------------------
|
2021-11-18 16:18:10 -05:00
|
|
|
if (!param.noPriorWal)
|
2019-12-13 17:14:26 -05:00
|
|
|
{
|
|
|
|
InfoArchive *infoArchive = infoArchiveLoadFile(storageRepo(), INFO_ARCHIVE_PATH_FILE_STR, cipherTypeNone, NULL);
|
|
|
|
const String *archiveId = infoArchiveId(infoArchive);
|
2020-01-21 10:29:46 -07:00
|
|
|
StringList *walSegmentList = pgLsnRangeToWalSegmentList(
|
2021-11-18 16:18:10 -05:00
|
|
|
pgControl.version, param.timeline, lsnStart - pgControl.walSegmentSize,
|
|
|
|
param.noWal ? lsnStart - pgControl.walSegmentSize : lsnStop,
|
|
|
|
pgControl.walSegmentSize);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
Buffer *walBuffer = bufNew((size_t)pgControl.walSegmentSize);
|
|
|
|
bufUsedSet(walBuffer, bufSize(walBuffer));
|
|
|
|
memset(bufPtr(walBuffer), 0, bufSize(walBuffer));
|
2021-05-17 07:20:28 -04:00
|
|
|
hrnPgWalToBuffer((PgWal){.version = pgControl.version, .systemId = pgControl.systemId}, walBuffer);
|
2022-05-25 15:27:53 -04:00
|
|
|
const String *walChecksum = bufHex(cryptoHashOne(hashTypeSha1, walBuffer));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
for (unsigned int walSegmentIdx = 0; walSegmentIdx < strLstSize(walSegmentList); walSegmentIdx++)
|
|
|
|
{
|
|
|
|
StorageWrite *write = storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
|
|
|
strNewFmt(
|
2020-07-30 07:49:06 -04:00
|
|
|
STORAGE_REPO_ARCHIVE "/%s/%s-%s%s", strZ(archiveId), strZ(strLstGet(walSegmentList, walSegmentIdx)),
|
|
|
|
strZ(walChecksum), strZ(compressExtStr(param.walCompressType))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-03-06 14:41:03 -05:00
|
|
|
if (param.walCompressType != compressTypeNone)
|
|
|
|
ioFilterGroupAdd(ioWriteFilterGroup(storageWriteIo(write)), compressFilter(param.walCompressType, 1));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
storagePutP(write, walBuffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------------------
|
|
|
|
if (pgVersion == PG_VERSION_95)
|
|
|
|
{
|
|
|
|
ASSERT(!param.backupStandby);
|
|
|
|
ASSERT(!param.errorAfterStart);
|
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
if (param.noArchiveCheck)
|
2019-12-13 17:14:26 -05:00
|
|
|
{
|
2021-11-18 16:18:10 -05:00
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_95, pg1Path, false, NULL, NULL),
|
|
|
|
|
|
|
|
// Get start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Start backup
|
|
|
|
HRNPQ_MACRO_ADVISORY_LOCK(1, true),
|
|
|
|
HRNPQ_MACRO_IS_IN_BACKUP(1, false),
|
2022-01-06 15:34:04 -05:00
|
|
|
HRNPQ_MACRO_START_BACKUP_LE_95(1, param.startFast, lsnStartStr, walSegmentStart),
|
2021-11-18 16:18:10 -05:00
|
|
|
HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"),
|
|
|
|
HRNPQ_MACRO_TABLESPACE_LIST_0(1),
|
|
|
|
|
|
|
|
// Get copy start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-12-08 10:16:41 -05:00
|
|
|
// Ping
|
|
|
|
HRNPQ_MACRO_IS_STANDBY_QUERY(1, false),
|
|
|
|
HRNPQ_MACRO_IS_STANDBY_QUERY(1, false),
|
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Stop backup
|
|
|
|
HRNPQ_MACRO_STOP_BACKUP_LE_95(1, lsnStopStr, walSegmentStop),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Get stop time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 2000),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_95, pg1Path, false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Get start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Start backup
|
|
|
|
HRNPQ_MACRO_ADVISORY_LOCK(1, true),
|
|
|
|
HRNPQ_MACRO_IS_IN_BACKUP(1, false),
|
|
|
|
HRNPQ_MACRO_CURRENT_WAL_LE_96(1, walSegmentPrior),
|
2022-01-06 15:34:04 -05:00
|
|
|
HRNPQ_MACRO_START_BACKUP_LE_95(1, param.startFast, lsnStartStr, walSegmentStart),
|
2021-11-18 16:18:10 -05:00
|
|
|
HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"),
|
|
|
|
HRNPQ_MACRO_TABLESPACE_LIST_0(1),
|
|
|
|
|
|
|
|
// Get copy start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000),
|
|
|
|
|
2021-12-08 10:16:41 -05:00
|
|
|
// Ping
|
|
|
|
HRNPQ_MACRO_IS_STANDBY_QUERY(1, false),
|
|
|
|
HRNPQ_MACRO_IS_STANDBY_QUERY(1, false),
|
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Stop backup
|
|
|
|
HRNPQ_MACRO_STOP_BACKUP_LE_95(1, lsnStopStr, walSegmentStop),
|
|
|
|
|
|
|
|
// Get stop time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 2000),
|
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
}
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------------------
|
|
|
|
else if (pgVersion == PG_VERSION_96)
|
|
|
|
{
|
|
|
|
ASSERT(param.backupStandby);
|
|
|
|
ASSERT(!param.errorAfterStart);
|
2021-11-18 16:18:10 -05:00
|
|
|
ASSERT(!param.noArchiveCheck);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-12-07 09:21:07 -05:00
|
|
|
// Save pg_control with updated info
|
|
|
|
HRN_STORAGE_PUT(storagePgIdxWrite(1), PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, hrnPgControlToBuffer(pgControl));
|
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
if (param.noPriorWal)
|
2019-12-13 17:14:26 -05:00
|
|
|
{
|
2021-11-18 16:18:10 -05:00
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
|
|
|
HRNPQ_MACRO_OPEN_GE_96(1, "dbname='postgres' port=5432", PG_VERSION_96, pg1Path, false, NULL, NULL),
|
|
|
|
|
|
|
|
// Connect to standby
|
|
|
|
HRNPQ_MACRO_OPEN_GE_96(2, "dbname='postgres' port=5433", PG_VERSION_96, pg2Path, true, NULL, NULL),
|
|
|
|
|
|
|
|
// Get start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000),
|
|
|
|
|
|
|
|
// Start backup
|
|
|
|
HRNPQ_MACRO_ADVISORY_LOCK(1, true),
|
|
|
|
HRNPQ_MACRO_CURRENT_WAL_LE_96(1, walSegmentPrior),
|
|
|
|
HRNPQ_MACRO_START_BACKUP_96(1, true, lsnStartStr, walSegmentStart),
|
|
|
|
HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"),
|
|
|
|
HRNPQ_MACRO_TABLESPACE_LIST_0(1),
|
|
|
|
|
|
|
|
// Wait for standby to sync
|
|
|
|
HRNPQ_MACRO_REPLAY_WAIT_96(2, lsnStartStr),
|
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
|
|
|
HRNPQ_MACRO_OPEN_GE_96(1, "dbname='postgres' port=5432", PG_VERSION_96, pg1Path, false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Connect to standby
|
|
|
|
HRNPQ_MACRO_OPEN_GE_96(2, "dbname='postgres' port=5433", PG_VERSION_96, pg2Path, true, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Get start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Start backup
|
|
|
|
HRNPQ_MACRO_ADVISORY_LOCK(1, true),
|
|
|
|
HRNPQ_MACRO_CURRENT_WAL_LE_96(1, walSegmentPrior),
|
|
|
|
HRNPQ_MACRO_START_BACKUP_96(1, true, lsnStartStr, walSegmentStart),
|
|
|
|
HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"),
|
|
|
|
HRNPQ_MACRO_TABLESPACE_LIST_0(1),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Wait for standby to sync
|
|
|
|
HRNPQ_MACRO_REPLAY_WAIT_96(2, lsnStartStr),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Get copy start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-12-08 10:16:41 -05:00
|
|
|
// Ping
|
|
|
|
HRNPQ_MACRO_IS_STANDBY_QUERY(1, false),
|
|
|
|
HRNPQ_MACRO_IS_STANDBY_QUERY(2, true),
|
|
|
|
HRNPQ_MACRO_IS_STANDBY_QUERY(1, false),
|
|
|
|
HRNPQ_MACRO_IS_STANDBY_QUERY(2, true),
|
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Stop backup
|
|
|
|
HRNPQ_MACRO_STOP_BACKUP_96(1, lsnStopStr, walSegmentStop, false),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Get stop time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 2000),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
}
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
// -----------------------------------------------------------------------------------------------------------------------------
|
|
|
|
else if (pgVersion == PG_VERSION_11)
|
|
|
|
{
|
|
|
|
ASSERT(!param.backupStandby);
|
2021-11-18 16:18:10 -05:00
|
|
|
ASSERT(!param.noArchiveCheck);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
if (param.errorAfterStart)
|
|
|
|
{
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2020-06-25 08:02:48 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_96(1, "dbname='postgres' port=5432", PG_VERSION_11, pg1Path, false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Get start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000),
|
|
|
|
|
|
|
|
// Start backup
|
|
|
|
HRNPQ_MACRO_ADVISORY_LOCK(1, true),
|
2021-11-18 16:18:10 -05:00
|
|
|
HRNPQ_MACRO_CURRENT_WAL_GE_10(1, walSegmentPrior),
|
2019-12-13 17:14:26 -05:00
|
|
|
HRNPQ_MACRO_START_BACKUP_GE_10(1, param.startFast, lsnStartStr, walSegmentStart),
|
|
|
|
HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"),
|
|
|
|
HRNPQ_MACRO_TABLESPACE_LIST_1(1, 32768, "tblspc32768"),
|
|
|
|
|
|
|
|
// Get copy start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000),
|
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2020-06-25 08:02:48 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_96(1, "dbname='postgres' port=5432", PG_VERSION_11, pg1Path, false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Get start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000),
|
|
|
|
|
|
|
|
// Start backup
|
|
|
|
HRNPQ_MACRO_ADVISORY_LOCK(1, true),
|
2021-11-18 16:18:10 -05:00
|
|
|
HRNPQ_MACRO_CURRENT_WAL_GE_10(1, walSegmentStart),
|
2019-12-13 17:14:26 -05:00
|
|
|
HRNPQ_MACRO_START_BACKUP_GE_10(1, param.startFast, lsnStartStr, walSegmentStart),
|
2021-11-18 16:18:10 -05:00
|
|
|
|
|
|
|
// Switch WAL segment so it can be checked
|
|
|
|
HRNPQ_MACRO_CREATE_RESTORE_POINT(1, "X/X"),
|
|
|
|
HRNPQ_MACRO_WAL_SWITCH(1, "wal", walSegmentStart),
|
|
|
|
|
|
|
|
// Get database and tablespace list
|
2021-07-15 17:00:20 -04:00
|
|
|
HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"),
|
2019-12-13 17:14:26 -05:00
|
|
|
HRNPQ_MACRO_TABLESPACE_LIST_1(1, 32768, "tblspc32768"),
|
|
|
|
|
|
|
|
// Get copy start time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000),
|
|
|
|
|
2021-12-08 10:16:41 -05:00
|
|
|
// Ping
|
|
|
|
HRNPQ_MACRO_IS_STANDBY_QUERY(1, false),
|
|
|
|
HRNPQ_MACRO_IS_STANDBY_QUERY(1, false),
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// Stop backup
|
2021-11-15 14:32:22 -05:00
|
|
|
HRNPQ_MACRO_STOP_BACKUP_GE_10(1, lsnStopStr, walSegmentStop, true),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Get stop time
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 2000),
|
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
THROW_FMT(AssertError, "unsupported test version %u", pgVersion); // {uncoverable - no invalid versions in tests}
|
|
|
|
};
|
2019-07-25 14:34:16 -04:00
|
|
|
|
2022-05-04 12:52:05 -04:00
|
|
|
/***********************************************************************************************************************************
|
|
|
|
Wrap cmdBackup() with lock acquire and release
|
|
|
|
***********************************************************************************************************************************/
|
|
|
|
void testCmdBackup(void)
|
|
|
|
{
|
|
|
|
FUNCTION_HARNESS_VOID();
|
|
|
|
|
|
|
|
lockAcquire(TEST_PATH_STR, cfgOptionStr(cfgOptStanza), cfgOptionStr(cfgOptExecId), lockTypeBackup, 0, true);
|
|
|
|
|
|
|
|
TRY_BEGIN()
|
|
|
|
{
|
|
|
|
cmdBackup();
|
|
|
|
}
|
|
|
|
FINALLY()
|
|
|
|
{
|
|
|
|
lockRelease(true);
|
|
|
|
}
|
|
|
|
TRY_END();
|
|
|
|
|
|
|
|
FUNCTION_HARNESS_RETURN_VOID();
|
|
|
|
}
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
/***********************************************************************************************************************************
|
|
|
|
Test Run
|
|
|
|
***********************************************************************************************************************************/
|
2021-10-18 12:22:48 -04:00
|
|
|
static void
|
2019-07-25 14:34:16 -04:00
|
|
|
testRun(void)
|
|
|
|
{
|
|
|
|
FUNCTION_HARNESS_VOID();
|
|
|
|
|
2021-05-21 12:45:00 -04:00
|
|
|
// Install local command handler shim
|
|
|
|
static const ProtocolServerHandler testLocalHandlerList[] = {PROTOCOL_SERVER_HANDLER_BACKUP_LIST};
|
2022-04-07 19:00:15 -04:00
|
|
|
hrnProtocolLocalShimInstall(testLocalHandlerList, LENGTH_OF(testLocalHandlerList));
|
2021-05-21 12:45:00 -04:00
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// The tests expect the timezone to be UTC
|
|
|
|
setenv("TZ", "UTC", true);
|
|
|
|
|
2021-05-22 09:30:54 -04:00
|
|
|
Storage *storageTest = storagePosixNewP(TEST_PATH_STR, .write = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-18 17:50:05 -06:00
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupRegExp()"))
|
|
|
|
{
|
|
|
|
const String *full = STRDEF("20181119-152138F");
|
|
|
|
const String *incr = STRDEF("20181119-152138F_20181119-152152I");
|
|
|
|
const String *diff = STRDEF("20181119-152138F_20181119-152152D");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("regular expression - error");
|
|
|
|
|
|
|
|
TEST_ERROR(
|
|
|
|
backupRegExpP(0),
|
|
|
|
AssertError, "assertion 'param.full || param.differential || param.incremental' failed");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("regular expression - match full");
|
|
|
|
|
|
|
|
String *filter = backupRegExpP(.full = true);
|
|
|
|
TEST_RESULT_STR_Z(filter, "^[0-9]{8}\\-[0-9]{6}F$", "full backup regex with anchors");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, incr), false, "does not exactly match incr");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, diff), false, "does not exactly match diff");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, full), true, "exactly matches full");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("regular expression - match full, incremental");
|
|
|
|
|
|
|
|
filter = backupRegExpP(.full = true, .incremental = true);
|
|
|
|
|
|
|
|
TEST_RESULT_STR_Z(
|
|
|
|
filter, "^[0-9]{8}\\-[0-9]{6}F(\\_[0-9]{8}\\-[0-9]{6}I){0,1}$", "full and optional incr backup regex with anchors");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, incr), true, "match incr");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, diff), false, "does not match diff");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, full), true, "match full");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
regExpMatchOne(
|
|
|
|
filter, STRDEF("12341234-123123F_12341234-123123IG")), false, "does not match with trailing character");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
regExpMatchOne(
|
|
|
|
filter, STRDEF("A12341234-123123F_12341234-123123I")), false, "does not match with leading character");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("regular expression - match full, differential");
|
|
|
|
|
|
|
|
filter = backupRegExpP(.full = true, .differential = true);
|
|
|
|
|
|
|
|
TEST_RESULT_STR_Z(
|
|
|
|
filter, "^[0-9]{8}\\-[0-9]{6}F(\\_[0-9]{8}\\-[0-9]{6}D){0,1}$", "full and optional diff backup regex with anchors");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, incr), false, "does not match incr");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, diff), true, "match diff");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, full), true, "match full");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("regular expression - match full, incremental, differential");
|
|
|
|
|
|
|
|
filter = backupRegExpP(.full = true, .incremental = true, .differential = true);
|
|
|
|
|
|
|
|
TEST_RESULT_STR_Z(
|
|
|
|
filter, "^[0-9]{8}\\-[0-9]{6}F(\\_[0-9]{8}\\-[0-9]{6}(D|I)){0,1}$",
|
|
|
|
"full, optional diff and incr backup regex with anchors");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, incr), true, "match incr");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, diff), true, "match diff");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, full), true, "match full");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("regular expression - match incremental, differential without end anchor");
|
|
|
|
|
|
|
|
filter = backupRegExpP(.incremental = true, .differential = true, .noAnchorEnd = true);
|
|
|
|
|
|
|
|
TEST_RESULT_STR_Z(filter, "^[0-9]{8}\\-[0-9]{6}F\\_[0-9]{8}\\-[0-9]{6}(D|I)", "diff and incr backup regex with anchors");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, incr), true, "match incr");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, diff), true, "match diff");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, full), false, "does not match full");
|
|
|
|
TEST_RESULT_BOOL(
|
|
|
|
regExpMatchOne(
|
|
|
|
filter, STRDEF("A12341234-123123F_12341234-123123I")), false, "does not match with leading character");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("regular expression - match incremental");
|
|
|
|
|
|
|
|
filter = backupRegExpP(.incremental = true);
|
|
|
|
|
|
|
|
TEST_RESULT_STR_Z(filter, "^[0-9]{8}\\-[0-9]{6}F\\_[0-9]{8}\\-[0-9]{6}I$", "incr backup regex with anchors");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, incr), true, "match incr");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, diff), false, "does not match diff");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, full), false, "does not match full");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("regular expression - match differential");
|
|
|
|
|
|
|
|
filter = backupRegExpP(.differential = true);
|
|
|
|
|
|
|
|
TEST_RESULT_STR_Z(filter, "^[0-9]{8}\\-[0-9]{6}F\\_[0-9]{8}\\-[0-9]{6}D$", "diff backup regex with anchors");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, incr), false, "does not match incr");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, diff), true, "match diff");
|
|
|
|
TEST_RESULT_BOOL(regExpMatchOne(filter, full), false, "does not match full");
|
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("PageChecksum"))
|
|
|
|
{
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("segment page default");
|
|
|
|
|
|
|
|
TEST_RESULT_UINT(PG_SEGMENT_PAGE_DEFAULT, 131072, "check pages per segment");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2022-02-23 12:05:53 -06:00
|
|
|
TEST_TITLE("two misaligned buffers in a row");
|
2022-02-18 17:50:05 -06:00
|
|
|
|
2022-02-23 12:05:53 -06:00
|
|
|
Buffer *buffer = bufNew(513);
|
2022-02-18 17:50:05 -06:00
|
|
|
bufUsedSet(buffer, bufSize(buffer));
|
|
|
|
memset(bufPtr(buffer), 0, bufSize(buffer));
|
2022-02-23 12:05:53 -06:00
|
|
|
|
2022-02-18 17:50:05 -06:00
|
|
|
*(PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0};
|
|
|
|
|
2022-02-23 12:05:53 -06:00
|
|
|
Buffer *bufferOut = bufNew(513);
|
2022-02-18 17:50:05 -06:00
|
|
|
IoWrite *write = ioBufferWriteNew(bufferOut);
|
|
|
|
ioFilterGroupAdd(
|
|
|
|
ioWriteFilterGroup(write),
|
2022-02-23 12:05:53 -06:00
|
|
|
pageChecksumNewPack(ioFilterParamList(pageChecksumNew(0, PG_SEGMENT_PAGE_DEFAULT, STRDEF(BOGUS_STR)))));
|
2022-02-18 17:50:05 -06:00
|
|
|
ioWriteOpen(write);
|
|
|
|
ioWrite(write, buffer);
|
2022-02-23 12:05:53 -06:00
|
|
|
TEST_ERROR(ioWrite(write, buffer), AssertError, "should not be possible to see two misaligned pages in a row");
|
2022-02-18 17:50:05 -06:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2022-02-23 12:05:53 -06:00
|
|
|
TEST_TITLE("retry a page with an invalid checksum");
|
2022-02-18 17:50:05 -06:00
|
|
|
|
2022-02-23 12:05:53 -06:00
|
|
|
// Write to file with valid checksums
|
|
|
|
buffer = bufNew(PG_PAGE_SIZE_DEFAULT * 4);
|
2022-02-18 17:50:05 -06:00
|
|
|
memset(bufPtr(buffer), 0, bufSize(buffer));
|
|
|
|
bufUsedSet(buffer, bufSize(buffer));
|
|
|
|
|
2022-02-23 12:05:53 -06:00
|
|
|
*(PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0x00};
|
|
|
|
*(PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0xFF};
|
|
|
|
((PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x01)))->pd_checksum = pgPageChecksum(
|
|
|
|
bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x01), 1);
|
|
|
|
*(PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x02)) = (PageHeaderData){.pd_upper = 0x00};
|
|
|
|
*(PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x03)) = (PageHeaderData){.pd_upper = 0xFE};
|
|
|
|
((PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x03)))->pd_checksum = pgPageChecksum(
|
|
|
|
bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x03), 3);
|
2022-02-18 17:50:05 -06:00
|
|
|
|
2022-02-23 12:05:53 -06:00
|
|
|
HRN_STORAGE_PUT(storageTest, "relation", buffer);
|
2022-02-18 17:50:05 -06:00
|
|
|
|
2022-02-23 12:05:53 -06:00
|
|
|
// Now break the checksum to force a retry
|
|
|
|
((PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x01)))->pd_checksum = 0;
|
|
|
|
((PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x03)))->pd_checksum = 0;
|
2022-02-18 17:50:05 -06:00
|
|
|
|
|
|
|
write = ioBufferWriteNew(bufferOut);
|
2022-02-23 12:05:53 -06:00
|
|
|
ioFilterGroupAdd(
|
|
|
|
ioWriteFilterGroup(write), pageChecksumNew(0, PG_SEGMENT_PAGE_DEFAULT, storagePathP(storageTest, STRDEF("relation"))));
|
2022-02-18 17:50:05 -06:00
|
|
|
ioWriteOpen(write);
|
|
|
|
ioWrite(write, buffer);
|
|
|
|
ioWriteClose(write);
|
|
|
|
|
|
|
|
TEST_RESULT_STR_Z(
|
|
|
|
hrnPackToStr(ioFilterGroupResultPackP(ioWriteFilterGroup(write), PAGE_CHECKSUM_FILTER_TYPE)),
|
2022-02-23 12:05:53 -06:00
|
|
|
"2:bool:true, 3:bool:true", "valid on retry");
|
2022-02-18 17:50:05 -06:00
|
|
|
}
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("segmentNumber()"))
|
|
|
|
{
|
2022-01-09 10:11:00 -05:00
|
|
|
TEST_RESULT_UINT(segmentNumber(STRDEF("999")), 0, "No segment number");
|
|
|
|
TEST_RESULT_UINT(segmentNumber(STRDEF("999.123")), 123, "Segment number");
|
2019-07-25 14:34:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
2021-05-21 12:45:00 -04:00
|
|
|
if (testBegin("backupFile()"))
|
2019-07-25 14:34:16 -04:00
|
|
|
{
|
2022-01-09 10:11:00 -05:00
|
|
|
const String *pgFile = STRDEF("testfile");
|
|
|
|
const String *missingFile = STRDEF("missing");
|
|
|
|
const String *backupLabel = STRDEF("20190718-155825F");
|
|
|
|
const String *backupPathFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(pgFile));
|
|
|
|
BackupFileResult result = {0};
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
// Load Parameters
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// Create the pg path
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PATH_CREATE(storagePgWrite(), NULL, .mode = 0700);
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("pg file missing - ignoreMissing=true");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
List *fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
BackupFile file =
|
|
|
|
{
|
|
|
|
.pgFile = missingFile,
|
|
|
|
.pgFileIgnoreMissing = true,
|
|
|
|
.pgFileSize = 0,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = NULL,
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = missingFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
|
|
|
const String *repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, false, cipherTypeNone, NULL, fileList), 0),
|
2019-07-25 14:34:16 -04:00
|
|
|
"pg file missing, ignoreMissing=true, no delta");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 0, "copy/repo size 0");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultSkip, "skip file");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("pg file missing - ignoreMissing=false");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = missingFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 0,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = NULL,
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = missingFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
2021-05-22 09:30:54 -04:00
|
|
|
TEST_ERROR(
|
2022-02-14 13:24:14 -06:00
|
|
|
backupFile(repoFile, compressTypeNone, 1, false, cipherTypeNone, NULL, fileList), FileMissingError,
|
|
|
|
"unable to open missing file '" TEST_PATH "/pg/missing' for read");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// Create a pg file to backup
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), strZ(pgFile), "atestfile");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
// Remove repo file
|
|
|
|
HRN_STORAGE_REMOVE(storageRepoWrite(), strZ(backupPathFile));
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("test pagechecksum while db file grows");
|
2020-03-19 13:16:05 -04:00
|
|
|
|
|
|
|
// Increase the file size but most of the following tests will still treat the file as size 9. This tests the common case
|
|
|
|
// where a file grows while a backup is running.
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), strZ(pgFile), "atestfile###");
|
2020-03-19 13:16:05 -04:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = NULL,
|
|
|
|
.pgFileChecksumPage = true,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
|
|
|
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, false, cipherTypeNone, NULL, fileList), 0),
|
2019-07-25 14:34:16 -04:00
|
|
|
"file checksummed with pageChecksum enabled");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize, 9, "copy=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 9, "repo=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, "copy file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "copy checksum matches");
|
2021-09-24 17:40:31 -04:00
|
|
|
TEST_RESULT_STR_Z(hrnPackToStr(result.pageChecksumResult), "2:bool:false, 3:bool:false", "pageChecksumResult");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_STORAGE_EXISTS(storageRepoWrite(), strZ(backupPathFile), .remove = true, .comment = "check exists in repo, remove");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2022-02-23 12:05:53 -06:00
|
|
|
TEST_TITLE("pgFileSize, ignoreMissing=false, backupLabel, pgFileChecksumPage");
|
2021-05-21 12:45:00 -04:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 8,
|
|
|
|
.pgFileCopyExactSize = false,
|
|
|
|
.pgFileChecksum = NULL,
|
|
|
|
.pgFileChecksumPage = true,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
2021-05-21 12:45:00 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, false, cipherTypeNone, NULL, fileList), 0),
|
2021-05-21 12:45:00 -04:00
|
|
|
"backup file");
|
|
|
|
TEST_RESULT_UINT(result.copySize, 12, "copy size");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 12, "repo size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, "copy file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "c3ae4687ea8ccd47bfdb190dbe7fd3b37545fdb9", "checksum");
|
2021-09-24 17:40:31 -04:00
|
|
|
TEST_RESULT_STR_Z(hrnPackToStr(result.pageChecksumResult), "2:bool:false, 3:bool:false", "page checksum");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_STORAGE_GET(storageRepo(), strZ(backupPathFile), "atestfile###");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("file exists in repo and db, checksum match - NOOP");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = true,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
// File exists in repo and db, pg checksum match, delta set, ignoreMissing false, hasReference - NOOP
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
|
2019-07-25 14:34:16 -04:00
|
|
|
"file in db and repo, checksum equal, no ignoreMissing, no pageChecksum, delta, hasReference");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize, 9, "copy size set");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 0, "repo size not set since already exists in repo");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultNoOp, "noop file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "copy checksum matches");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
|
|
|
|
TEST_STORAGE_GET(storageRepo(), strZ(backupPathFile), "atestfile###", .comment = "file not modified");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("file exists in repo and db, checksum mismatch - COPY");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = STRDEF("1234567890123456789012345678901234567890"),
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = true,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
// File exists in repo and db, pg checksum mismatch, delta set, ignoreMissing false, hasReference - COPY
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
|
2019-07-25 14:34:16 -04:00
|
|
|
"file in db and repo, pg checksum not equal, no ignoreMissing, no pageChecksum, delta, hasReference");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize, 9, "copy 9 bytes");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 9, "repo=copy size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, "copy file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "copy checksum for file size 9");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
|
|
|
|
TEST_STORAGE_GET(storageRepo(), strZ(backupPathFile), "atestfile", .comment = "9 bytes copied");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("file exists in repo and pg, copy only exact file even if size passed is greater - COPY");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9999999,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = true,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
// File exists in repo and pg, pg checksum same, pg size passed is different, delta set, ignoreMissing false, hasReference
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
|
2019-07-25 14:34:16 -04:00
|
|
|
"db & repo file, pg checksum same, pg size different, no ignoreMissing, no pageChecksum, delta, hasReference");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize, 12, "copy=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 12, "repo=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, "copy file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "c3ae4687ea8ccd47bfdb190dbe7fd3b37545fdb9", "copy checksum updated");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
|
|
|
|
TEST_STORAGE_GET(storageRepo(), strZ(backupPathFile), "atestfile###", .comment = "confirm contents");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
2019-12-07 09:48:33 -05:00
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("resumed file is missing in repo but present in resumed manifest, file same name in repo - RECOPY");
|
2019-12-07 09:48:33 -05:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = STRDEF(BOGUS_STR),
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
|
|
|
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_STORAGE_LIST(
|
|
|
|
storageRepo(), STORAGE_REPO_BACKUP "/20190718-155825F", "testfile\n", .comment = "resumed file is missing in repo");
|
2019-12-07 09:48:33 -05:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
|
2021-07-15 17:00:20 -04:00
|
|
|
"backup 9 bytes of pgfile to file to resume in repo");
|
|
|
|
TEST_RESULT_UINT(result.copySize, 9, "copy 9 bytes");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 9, "repo=copy size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, "check recopy result");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "copy checksum for file size 9");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
|
|
|
|
TEST_STORAGE_GET(
|
|
|
|
storageRepo(), strZ(backupPathFile), "atestfile###", .comment = "existing file with same name as pgFile not modified");
|
|
|
|
TEST_STORAGE_GET(
|
|
|
|
storageRepo(), STORAGE_REPO_BACKUP "/20190718-155825F/" BOGUS_STR, "atestfile", .comment = "resumed file copied");
|
2019-12-07 09:48:33 -05:00
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("file exists in repo & db, checksum not same in repo - RECOPY");
|
|
|
|
|
|
|
|
HRN_STORAGE_PUT_Z(
|
|
|
|
storageRepoWrite(), strZ(backupPathFile), "adifferentfile",
|
|
|
|
.comment = "create different file (size and checksum) with same name in repo");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
|
|
|
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
// Delta set, ignoreMissing false, no hasReference
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
|
2021-07-15 17:00:20 -04:00
|
|
|
"db & repo file, pgFileMatch, repo checksum no match, no ignoreMissing, no pageChecksum, delta, no hasReference");
|
|
|
|
TEST_RESULT_UINT(result.copySize, 9, "copy 9 bytes");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 9, "repo=copy size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, "recopy file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "copy checksum for file size 9");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
|
|
|
|
TEST_STORAGE_GET(storageRepo(), strZ(backupPathFile), "atestfile", .comment = "existing file recopied");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("file exists in repo but missing from db, checksum same in repo - SKIP");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = missingFile,
|
|
|
|
.pgFileIgnoreMissing = true,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, true, cipherTypeNone, NULL, fileList), 0),
|
2021-07-15 17:00:20 -04:00
|
|
|
"file in repo only, checksum in repo equal, ignoreMissing=true, no pageChecksum, delta, no hasReference");
|
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 0, "copy=repo=0 size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultSkip, "skip file");
|
|
|
|
TEST_RESULT_PTR(result.copyChecksum, NULL, "copy checksum NULL");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
|
|
|
|
TEST_STORAGE_LIST(
|
|
|
|
storageRepo(), STORAGE_REPO_BACKUP "/20190718-155825F", BOGUS_STR "\n", .comment = "file removed from repo");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("compression set, all other boolean parameters false - COPY");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = NULL,
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
|
|
|
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(backupLabel), strZ(file.manifestFile));
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeGz, 3, false, cipherTypeNone, NULL, fileList), 0),
|
2019-07-25 14:34:16 -04:00
|
|
|
"pg file exists, no checksum, no ignoreMissing, compression, no pageChecksum, no delta, no hasReference");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize, 9, "copy=pgFile size");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 29, "repo compress size");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, "copy file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "copy checksum");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
|
|
|
|
TEST_STORAGE_EXISTS(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepo(), zNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(backupLabel), strZ(pgFile)),
|
2021-07-15 17:00:20 -04:00
|
|
|
.comment = "copy file to repo compress success");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("pg and repo file exist & match, prior checksum, compression - COPY CHECKSUM");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeGz, 3, false, cipherTypeNone, NULL, fileList), 0),
|
2019-07-25 14:34:16 -04:00
|
|
|
"pg file & repo exists, match, checksum, no ignoreMissing, compression, no pageChecksum, no delta, no hasReference");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize, 9, "copy=pgFile size");
|
2022-02-09 09:32:23 -06:00
|
|
|
TEST_RESULT_UINT(result.repoSize, 0, "repo size not calculated");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultChecksum, "checksum file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "compressed repo file checksum matches");
|
|
|
|
TEST_STORAGE_EXISTS(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepo(), zNewFmt(STORAGE_REPO_BACKUP "/%s/%s.gz", strZ(backupLabel), strZ(pgFile)),
|
2021-07-15 17:00:20 -04:00
|
|
|
.comment = "compressed file exists");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("create a zero sized file - checksum will be set but in backupManifestUpdate it will not be copied");
|
|
|
|
|
|
|
|
// Create zero sized file in pg
|
|
|
|
HRN_STORAGE_PUT_EMPTY(storagePgWrite(), "zerofile");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = STRDEF("zerofile"),
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 0,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = NULL,
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = STRDEF("zerofile"),
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
|
|
|
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
// No prior checksum, no compression, no pageChecksum, no delta, no hasReference
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(backupFile(repoFile, compressTypeNone, 1, false, cipherTypeNone, NULL, fileList), 0),
|
2019-07-25 14:34:16 -04:00
|
|
|
"zero-sized pg file exists, no repo file, no ignoreMissing, no pageChecksum, no delta, no hasReference");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize + result.repoSize, 0, "copy=repo=pgFile size 0");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, "copy file");
|
|
|
|
TEST_RESULT_PTR_NE(result.copyChecksum, NULL, "checksum set");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum result is NULL");
|
|
|
|
TEST_STORAGE_LIST(
|
|
|
|
storageRepo(), STORAGE_REPO_BACKUP "/20190718-155825F",
|
|
|
|
BOGUS_STR "\n"
|
|
|
|
"testfile.gz\n"
|
|
|
|
"zerofile\n",
|
|
|
|
.comment = "copy zero file to repo success");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
2022-01-09 10:11:00 -05:00
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("copy file to encrypted repo");
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
// Load Parameters
|
2022-01-09 10:11:00 -05:00
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
|
|
|
hrnCfgArgRawStrId(argList, cfgOptRepoCipherType, cipherTypeAes256Cbc);
|
|
|
|
hrnCfgEnvRawZ(cfgOptRepoCipherPass, TEST_CIPHER_PASS);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgEnvRemoveRaw(cfgOptRepoCipherPass);
|
2019-07-25 14:34:16 -04:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
// Create the pg path and pg file to backup
|
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), strZ(pgFile), "atestfile");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = NULL,
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
|
|
|
repoFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/%s", strZ(backupLabel), strZ(file.manifestFile));
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
// No prior checksum, no compression, no pageChecksum, no delta, no hasReference
|
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(
|
|
|
|
backupFile(repoFile, compressTypeNone, 1, false, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
|
2019-07-25 14:34:16 -04:00
|
|
|
"pg file exists, no repo file, no ignoreMissing, no pageChecksum, no delta, no hasReference");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize, 9, "copy size set");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 32, "repo size set");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, "copy file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "copy checksum");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum NULL");
|
|
|
|
TEST_STORAGE_GET(
|
|
|
|
storageRepo(), strZ(backupPathFile), "atestfile", .cipherType = cipherTypeAes256Cbc,
|
|
|
|
.comment = "copy file to encrypted repo success");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("delta, copy file (size mismatch) to encrypted repo");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 8,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
// Delta but pgFile does not match size passed, prior checksum, no compression, no pageChecksum, delta, no hasReference
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(
|
|
|
|
backupFile(repoFile, compressTypeNone, 1, true, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
|
2019-07-25 14:34:16 -04:00
|
|
|
"pg and repo file exists, pgFileMatch false, no ignoreMissing, no pageChecksum, delta, no hasReference");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize, 8, "copy size set");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 32, "repo size set");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultCopy, "copy file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "acc972a8319d4903b839c64ec217faa3e77b4fcb", "copy checksum for size passed");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum NULL");
|
|
|
|
TEST_STORAGE_GET(
|
|
|
|
storageRepo(), strZ(backupPathFile), "atestfil", .cipherType = cipherTypeAes256Cbc,
|
|
|
|
.comment = "delta, copy file (size missmatch) to encrypted repo success");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("no delta, recopy (size mismatch) file to encrypted repo");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = STRDEF("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67"),
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
2019-07-25 14:34:16 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(
|
|
|
|
backupFile(repoFile, compressTypeNone, 0, false, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
|
2021-07-15 17:00:20 -04:00
|
|
|
"pg and repo file exists, checksum mismatch, no ignoreMissing, no pageChecksum, no delta, no hasReference");
|
|
|
|
TEST_RESULT_UINT(result.copySize, 9, "copy size set");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 32, "repo size set");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, "recopy file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "copy checksum");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum NULL");
|
|
|
|
TEST_STORAGE_GET(
|
|
|
|
storageRepoWrite(), strZ(backupPathFile), "atestfile", .cipherType = cipherTypeAes256Cbc,
|
|
|
|
.comment = "recopy file to encrypted repo success");
|
2019-07-25 14:34:16 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_TITLE("no delta, recopy (checksum mismatch), file to encrypted repo");
|
2021-05-21 12:45:00 -04:00
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
fileList = lstNewP(sizeof(BackupFile));
|
|
|
|
|
|
|
|
file = (BackupFile)
|
|
|
|
{
|
|
|
|
.pgFile = pgFile,
|
|
|
|
.pgFileIgnoreMissing = false,
|
|
|
|
.pgFileSize = 9,
|
|
|
|
.pgFileCopyExactSize = true,
|
|
|
|
.pgFileChecksum = STRDEF("1234567890123456789012345678901234567890"),
|
|
|
|
.pgFileChecksumPage = false,
|
|
|
|
.manifestFile = pgFile,
|
|
|
|
.manifestFileHasReference = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
lstAdd(fileList, &file);
|
|
|
|
|
2021-05-21 12:45:00 -04:00
|
|
|
TEST_ASSIGN(
|
|
|
|
result,
|
2022-02-14 13:24:14 -06:00
|
|
|
*(BackupFileResult *)lstGet(
|
|
|
|
backupFile(repoFile, compressTypeNone, 0, false, cipherTypeAes256Cbc, STRDEF(TEST_CIPHER_PASS), fileList), 0),
|
2021-05-21 12:45:00 -04:00
|
|
|
"backup file");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_UINT(result.copySize, 9, "copy size set");
|
|
|
|
TEST_RESULT_UINT(result.repoSize, 32, "repo size set");
|
|
|
|
TEST_RESULT_UINT(result.backupCopyResult, backupCopyResultReCopy, "recopy file");
|
|
|
|
TEST_RESULT_STR_Z(result.copyChecksum, "9bc8ab2dda60ef4beed07d1e19ce0676d5edde67", "copy checksum for size passed");
|
|
|
|
TEST_RESULT_PTR(result.pageChecksumResult, NULL, "page checksum NULL");
|
|
|
|
TEST_STORAGE_GET(
|
|
|
|
storageRepo(), strZ(backupPathFile), "atestfile",
|
|
|
|
.cipherType = cipherTypeAes256Cbc, .comment = "recopy file to encrypted repo, success");
|
2019-07-25 14:34:16 -04:00
|
|
|
}
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupLabelCreate()"))
|
|
|
|
{
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
time_t timestamp = 1575401652;
|
|
|
|
String *backupLabel = backupLabelFormat(backupTypeFull, NULL, timestamp);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("assign label when no history");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PATH_CREATE(storageRepoWrite(), STORAGE_REPO_BACKUP "/backup.history/2019");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR(backupLabelCreate(backupTypeFull, NULL, timestamp), backupLabel, "create label");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("assign label when history is older");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(),
|
|
|
|
zNewFmt(
|
|
|
|
STORAGE_REPO_BACKUP "/backup.history/2019/%s.manifest.gz",
|
|
|
|
strZ(backupLabelFormat(backupTypeFull, NULL, timestamp - 4))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR(backupLabelCreate(backupTypeFull, NULL, timestamp), backupLabel, "create label");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("assign label when backup is older");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s", strZ(backupLabelFormat(backupTypeFull, NULL, timestamp - 2))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2019-12-26 18:08:27 -07:00
|
|
|
TEST_RESULT_STR(backupLabelCreate(backupTypeFull, NULL, timestamp), backupLabel, "create label");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("advance time when backup is same");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s", strZ(backupLabelFormat(backupTypeFull, NULL, timestamp))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_STR_Z(backupLabelCreate(backupTypeFull, NULL, timestamp), "20191203-193413F", "create label");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when new label is in the past even with advanced time");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s", strZ(backupLabelFormat(backupTypeFull, NULL, timestamp + 1))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_ERROR(
|
2022-05-05 10:19:11 -04:00
|
|
|
backupLabelCreate(backupTypeFull, NULL, timestamp), ClockError,
|
2019-12-13 17:14:26 -05:00
|
|
|
"new backup label '20191203-193413F' is not later than latest backup label '20191203-193413F'\n"
|
|
|
|
"HINT: has the timezone changed?\n"
|
|
|
|
"HINT: is there clock skew?");
|
2022-05-05 09:20:49 -04:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when new label is in the past even with advanced time (from history)");
|
|
|
|
|
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(),
|
|
|
|
zNewFmt(
|
|
|
|
STORAGE_REPO_BACKUP "/backup.history/2019/%s.manifest.gz",
|
|
|
|
strZ(backupLabelFormat(backupTypeFull, NULL, timestamp + 3600))));
|
2022-05-05 09:20:49 -04:00
|
|
|
|
|
|
|
TEST_ERROR(
|
2022-05-05 10:19:11 -04:00
|
|
|
backupLabelCreate(backupTypeFull, NULL, timestamp), ClockError,
|
2022-05-05 09:20:49 -04:00
|
|
|
"new backup label '20191203-193413F' is not later than latest backup label '20191203-203412F'\n"
|
|
|
|
"HINT: has the timezone changed?\n"
|
|
|
|
"HINT: is there clock skew?");
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupInit()"))
|
|
|
|
{
|
|
|
|
// Set log level to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when backup from standby is not supported");
|
|
|
|
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptBackupStandby, true);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_ERROR(
|
2021-11-30 08:28:36 -05:00
|
|
|
backupInit(infoBackupNew(PG_VERSION_91, HRN_PG_SYSTEMID_91, hrnPgCatalogVersion(PG_VERSION_91), NULL)),
|
|
|
|
ConfigError, "option 'backup-standby' not valid for PostgreSQL < 9.2");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("warn and reset when backup from standby used in offline mode");
|
|
|
|
|
|
|
|
// Create pg_control
|
2021-11-30 13:23:11 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_92);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptBackupStandby, true);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
2021-11-30 08:28:36 -05:00
|
|
|
backupInit(infoBackupNew(PG_VERSION_92, HRN_PG_SYSTEMID_92, hrnPgCatalogVersion(PG_VERSION_92), NULL)),
|
2020-09-18 16:55:26 -04:00
|
|
|
"backup init");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptBackupStandby), false, "check backup-standby");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: option backup-standby is enabled but backup is offline - backups will be performed from the primary");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when pg_control does not match stanza");
|
|
|
|
|
|
|
|
// Create pg_control
|
2021-11-30 13:23:11 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_10);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_ERROR(
|
2021-11-30 08:28:36 -05:00
|
|
|
backupInit(infoBackupNew(PG_VERSION_11, HRN_PG_SYSTEMID_11, hrnPgCatalogVersion(PG_VERSION_11), NULL)),
|
2020-09-18 16:55:26 -04:00
|
|
|
BackupMismatchError,
|
2021-11-30 08:28:36 -05:00
|
|
|
"PostgreSQL version 10, system-id " HRN_PG_SYSTEMID_10_Z " do not match stanza version 11, system-id"
|
|
|
|
" " HRN_PG_SYSTEMID_11_Z "\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"HINT: is this the correct stanza?");
|
|
|
|
TEST_ERROR(
|
2021-11-30 08:28:36 -05:00
|
|
|
backupInit(infoBackupNew(PG_VERSION_10, HRN_PG_SYSTEMID_11, hrnPgCatalogVersion(PG_VERSION_10), NULL)),
|
2020-09-18 16:55:26 -04:00
|
|
|
BackupMismatchError,
|
2021-11-30 08:28:36 -05:00
|
|
|
"PostgreSQL version 10, system-id " HRN_PG_SYSTEMID_10_Z " do not match stanza version 10, system-id"
|
|
|
|
" " HRN_PG_SYSTEMID_11_Z "\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"HINT: is this the correct stanza?");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2019-12-14 09:53:50 -05:00
|
|
|
TEST_TITLE("reset stop-auto when PostgreSQL < 9.3");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Create pg_control
|
2022-01-06 15:34:04 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_90);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptStopAuto, true);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
2022-01-06 15:34:04 -05:00
|
|
|
backupInit(infoBackupNew(PG_VERSION_90, HRN_PG_SYSTEMID_90, hrnPgCatalogVersion(PG_VERSION_90), NULL)),
|
2020-09-18 16:55:26 -04:00
|
|
|
"backup init");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptStopAuto), false, "check stop-auto");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2019-12-14 09:53:50 -05:00
|
|
|
TEST_RESULT_LOG("P00 WARN: stop-auto option is only available in PostgreSQL >= 9.3");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("reset checksum-page when the cluster does not have checksums enabled");
|
|
|
|
|
|
|
|
// Create pg_control
|
2021-11-30 13:23:11 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_93);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Create stanza
|
|
|
|
argList = strLstNew();
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
|
|
|
HRN_CFG_LOAD(cfgCmdStanzaCreate, argList);
|
|
|
|
|
|
|
|
cmdStanzaCreate();
|
|
|
|
TEST_RESULT_LOG("P00 INFO: stanza-create for stanza 'test1' on repo1");
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptChecksumPage, true);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2021-07-15 17:00:20 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_93, TEST_PATH "/pg1", false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
2021-11-30 08:28:36 -05:00
|
|
|
dbFree(
|
|
|
|
backupInit(infoBackupNew(PG_VERSION_93, HRN_PG_SYSTEMID_93, hrnPgCatalogVersion(PG_VERSION_93), NULL))->dbPrimary),
|
2020-09-18 16:55:26 -04:00
|
|
|
"backup init");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptChecksumPage), false, "check checksum-page");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: checksum-page option set to true but checksums are not enabled on the cluster, resetting to false");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("ok if cluster checksums are enabled and checksum-page is any value");
|
|
|
|
|
|
|
|
// Create pg_control with page checksums
|
2021-11-30 13:23:11 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_93, .pageChecksum = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptChecksumPage, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2021-07-15 17:00:20 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_93, TEST_PATH "/pg1", false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
2021-11-30 08:28:36 -05:00
|
|
|
dbFree(
|
|
|
|
backupInit(infoBackupNew(PG_VERSION_93, HRN_PG_SYSTEMID_93, hrnPgCatalogVersion(PG_VERSION_93), NULL))->dbPrimary),
|
2020-09-18 16:55:26 -04:00
|
|
|
"backup init");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptChecksumPage), false, "check checksum-page");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Create pg_control without page checksums
|
2021-11-30 13:23:11 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_93);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2021-07-15 17:00:20 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_93, TEST_PATH "/pg1", false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
TEST_RESULT_VOID(
|
2021-11-30 08:28:36 -05:00
|
|
|
dbFree(
|
|
|
|
backupInit(infoBackupNew(PG_VERSION_93, HRN_PG_SYSTEMID_93, hrnPgCatalogVersion(PG_VERSION_93), NULL))->dbPrimary),
|
2020-09-18 16:55:26 -04:00
|
|
|
"backup init");
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_BOOL(cfgOptionBool(cfgOptChecksumPage), false, "check checksum-page");
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupTime()"))
|
|
|
|
{
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-12-02 22:41:14 -05:00
|
|
|
TEST_TITLE("sleep retries and stall error");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Create pg_control
|
2021-11-30 13:23:11 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_93);
|
2021-11-18 16:18:10 -05:00
|
|
|
|
|
|
|
// Create stanza
|
2019-12-13 17:14:26 -05:00
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2021-11-18 16:18:10 -05:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
|
|
|
HRN_CFG_LOAD(cfgCmdStanzaCreate, argList);
|
|
|
|
|
|
|
|
cmdStanzaCreate();
|
|
|
|
TEST_RESULT_LOG("P00 INFO: stanza-create for stanza 'test1' on repo1");
|
|
|
|
|
|
|
|
argList = strLstNew();
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
harnessPqScriptSet((HarnessPq [])
|
|
|
|
{
|
|
|
|
// Connect to primary
|
2021-07-15 17:00:20 -04:00
|
|
|
HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_93, TEST_PATH "/pg1", false, NULL, NULL),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-12-02 22:41:14 -05:00
|
|
|
// Advance the time slowly to force retries
|
2019-12-13 17:14:26 -05:00
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, 1575392588998),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, 1575392588999),
|
2020-12-02 22:41:14 -05:00
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, 1575392589001),
|
|
|
|
|
|
|
|
// Stall time to force an error
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, 1575392589998),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, 1575392589997),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, 1575392589998),
|
|
|
|
HRNPQ_MACRO_TIME_QUERY(1, 1575392589999),
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
HRNPQ_MACRO_DONE()
|
|
|
|
});
|
|
|
|
|
2020-09-18 16:55:26 -04:00
|
|
|
BackupData *backupData = backupInit(
|
2021-11-30 08:28:36 -05:00
|
|
|
infoBackupNew(PG_VERSION_93, HRN_PG_SYSTEMID_93, hrnPgCatalogVersion(PG_VERSION_93), NULL));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-12-02 22:41:14 -05:00
|
|
|
TEST_RESULT_INT(backupTime(backupData, true), 1575392588, "multiple tries for sleep");
|
|
|
|
TEST_ERROR(backupTime(backupData, true), KernelError, "PostgreSQL clock has not advanced to the next second after 3 tries");
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
dbFree(backupData->dbPrimary);
|
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupResumeFind()"))
|
|
|
|
{
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, "/pg");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeFull);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptCompress, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2021-01-12 12:38:32 -05:00
|
|
|
TEST_TITLE("cannot resume when manifest and copy are missing");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PATH_CREATE(storageRepoWrite(), STORAGE_REPO_BACKUP "/20191003-105320F");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind((Manifest *)1, NULL), NULL, "find resumable backup");
|
|
|
|
|
2021-01-12 12:38:32 -05:00
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed: partially deleted by prior resume or invalid");
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when resume is disabled");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PATH_CREATE(storageRepoWrite(), STORAGE_REPO_BACKUP "/20191003-105320F");
|
2021-01-12 12:38:32 -05:00
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
cfgOptionSet(cfgOptResume, cfgSourceParam, BOOL_FALSE_VAR);
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(storageRepoWrite(), STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind((Manifest *)1, NULL), NULL, "find resumable backup");
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
TEST_RESULT_LOG("P00 INFO: backup '20191003-105320F' cannot be resumed: resume is disabled");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_STORAGE_LIST_EMPTY(storageRepo(), STORAGE_REPO_BACKUP, .comment = "check backup path removed");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
cfgOptionSet(cfgOptResume, cfgSourceParam, BOOL_TRUE_VAR);
|
|
|
|
|
2021-10-26 13:53:44 -04:00
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when error on manifest load");
|
|
|
|
|
|
|
|
Manifest *manifest = NULL;
|
|
|
|
|
2022-05-18 10:52:01 -04:00
|
|
|
OBJ_NEW_BEGIN(Manifest, .childQty = MEM_CONTEXT_QTY_MAX)
|
2021-10-26 13:53:44 -04:00
|
|
|
{
|
|
|
|
manifest = manifestNewInternal();
|
|
|
|
manifest->pub.data.backupType = backupTypeFull;
|
|
|
|
manifest->pub.data.backrestVersion = STRDEF("BOGUS");
|
|
|
|
}
|
|
|
|
OBJ_NEW_END();
|
|
|
|
|
|
|
|
HRN_STORAGE_PUT_Z(storageRepoWrite(), STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, "X");
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed: unable to read"
|
|
|
|
" <REPO:BACKUP>/20191003-105320F/backup.manifest.copy");
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when pgBackRest version has changed");
|
|
|
|
|
2021-09-01 11:10:35 -04:00
|
|
|
Manifest *manifestResume = NULL;
|
|
|
|
|
2022-05-18 10:52:01 -04:00
|
|
|
OBJ_NEW_BEGIN(Manifest, .childQty = MEM_CONTEXT_QTY_MAX)
|
2021-09-01 11:10:35 -04:00
|
|
|
{
|
|
|
|
manifestResume = manifestNewInternal();
|
|
|
|
manifestResume->pub.info = infoNew(NULL);
|
|
|
|
manifestResume->pub.data.backupType = backupTypeFull;
|
2022-03-24 12:26:09 -06:00
|
|
|
manifestResume->pub.data.backupLabel = strNewZ("20191003-105320F");
|
2021-09-01 11:10:35 -04:00
|
|
|
manifestResume->pub.data.pgVersion = PG_VERSION_12;
|
|
|
|
}
|
|
|
|
OBJ_NEW_END();
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
manifestTargetAdd(manifestResume, &(ManifestTarget){.name = MANIFEST_TARGET_PGDATA_STR, .path = STRDEF("/pg")});
|
|
|
|
manifestPathAdd(manifestResume, &(ManifestPath){.name = MANIFEST_TARGET_PGDATA_STR});
|
2022-02-20 16:45:07 -06:00
|
|
|
manifestFileAdd(manifestResume, &(ManifestFile){.name = STRDEF("pg_data/" PG_FILE_PGVERSION)});
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT))));
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed:"
|
|
|
|
" new pgBackRest version 'BOGUS' does not match resumable pgBackRest version '" PROJECT_VERSION "'");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_STORAGE_LIST_EMPTY(storageRepo(), STORAGE_REPO_BACKUP, .comment = "check backup path removed");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-04-09 13:48:40 -04:00
|
|
|
manifest->pub.data.backrestVersion = STRDEF(PROJECT_VERSION);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when backup labels do not match (resumable is null)");
|
|
|
|
|
2021-04-09 13:48:40 -04:00
|
|
|
manifest->pub.data.backupType = backupTypeFull;
|
|
|
|
manifest->pub.data.backupLabelPrior = STRDEF("20191003-105320F");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT))));
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed:"
|
|
|
|
" new prior backup label '<undef>' does not match resumable prior backup label '20191003-105320F'");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_STORAGE_LIST_EMPTY(storageRepo(), STORAGE_REPO_BACKUP, .comment = "check backup path removed");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-04-09 13:48:40 -04:00
|
|
|
manifest->pub.data.backupLabelPrior = NULL;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when backup labels do not match (new is null)");
|
|
|
|
|
2021-04-09 13:48:40 -04:00
|
|
|
manifest->pub.data.backupType = backupTypeFull;
|
|
|
|
manifestResume->pub.data.backupLabelPrior = STRDEF("20191003-105320F");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT))));
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed:"
|
|
|
|
" new prior backup label '20191003-105320F' does not match resumable prior backup label '<undef>'");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_STORAGE_LIST_EMPTY(storageRepo(), STORAGE_REPO_BACKUP, .comment = "check backup path removed");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-04-09 13:48:40 -04:00
|
|
|
manifestResume->pub.data.backupLabelPrior = NULL;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("cannot resume when compression does not match");
|
|
|
|
|
2021-04-09 13:48:40 -04:00
|
|
|
manifestResume->pub.data.backupOptionCompressType = compressTypeGz;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT))));
|
|
|
|
|
|
|
|
TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup");
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: backup '20191003-105320F' cannot be resumed:"
|
2020-03-06 14:41:03 -05:00
|
|
|
" new compression 'none' does not match resumable compression 'gz'");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_STORAGE_LIST_EMPTY(storageRepo(), STORAGE_REPO_BACKUP, .comment = "check backup path removed");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-04-09 13:48:40 -04:00
|
|
|
manifestResume->pub.data.backupOptionCompressType = compressTypeNone;
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("backupJobResult()"))
|
|
|
|
{
|
|
|
|
// Set log level to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-01-26 13:19:13 -07:00
|
|
|
TEST_TITLE("report job error");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-01 10:08:56 -04:00
|
|
|
ProtocolParallelJob *job = protocolParallelJobNew(VARSTRDEF("key"), protocolCommandNew(strIdFromZ("x")));
|
2019-12-13 17:14:26 -05:00
|
|
|
protocolParallelJobErrorSet(job, errorTypeCode(&AssertError), STRDEF("error message"));
|
|
|
|
|
2022-05-04 12:52:05 -04:00
|
|
|
unsigned int currentPercentComplete = 0;
|
|
|
|
|
2022-02-14 13:24:14 -06:00
|
|
|
TEST_ERROR(
|
2022-05-04 12:52:05 -04:00
|
|
|
backupJobResult((Manifest *)1, NULL, storageTest, strLstNew(), job, false, 0, NULL, ¤tPercentComplete),
|
|
|
|
AssertError, "error message");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-01-26 13:19:13 -07:00
|
|
|
TEST_TITLE("report host/100% progress on noop result");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Create job that skips file
|
2021-11-01 10:08:56 -04:00
|
|
|
job = protocolParallelJobNew(VARSTRDEF("pg_data/test"), protocolCommandNew(strIdFromZ("x")));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-06-24 13:31:16 -04:00
|
|
|
PackWrite *const resultPack = protocolPackNew();
|
2022-02-14 13:24:14 -06:00
|
|
|
pckWriteStrP(resultPack, STRDEF("pg_data/test"));
|
2021-06-24 13:31:16 -04:00
|
|
|
pckWriteU32P(resultPack, backupCopyResultNoOp);
|
|
|
|
pckWriteU64P(resultPack, 0);
|
|
|
|
pckWriteU64P(resultPack, 0);
|
|
|
|
pckWriteStrP(resultPack, NULL);
|
|
|
|
pckWriteStrP(resultPack, NULL);
|
|
|
|
pckWriteEndP(resultPack);
|
|
|
|
|
2021-09-23 08:31:32 -04:00
|
|
|
protocolParallelJobResultSet(job, pckReadNew(pckWriteResult(resultPack)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Create manifest with file
|
2021-09-01 11:10:35 -04:00
|
|
|
Manifest *manifest = NULL;
|
|
|
|
|
2022-05-18 10:52:01 -04:00
|
|
|
OBJ_NEW_BEGIN(Manifest, .childQty = MEM_CONTEXT_QTY_MAX)
|
2021-09-01 11:10:35 -04:00
|
|
|
{
|
|
|
|
manifest = manifestNewInternal();
|
2022-02-20 16:45:07 -06:00
|
|
|
manifestFileAdd(manifest, &(ManifestFile){.name = STRDEF("pg_data/test")});
|
2021-09-01 11:10:35 -04:00
|
|
|
}
|
|
|
|
OBJ_NEW_END();
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-09 13:24:56 -05:00
|
|
|
uint64_t sizeProgress = 0;
|
2022-05-04 12:52:05 -04:00
|
|
|
currentPercentComplete = 4567;
|
2021-11-09 13:24:56 -05:00
|
|
|
|
|
|
|
TEST_RESULT_VOID(
|
2022-05-04 12:52:05 -04:00
|
|
|
lockAcquire(TEST_PATH_STR, cfgOptionStr(cfgOptStanza), cfgOptionStr(cfgOptExecId), lockTypeBackup, 0, true),
|
|
|
|
"acquire backup lock");
|
|
|
|
TEST_RESULT_VOID(
|
|
|
|
backupJobResult(manifest, STRDEF("host"), storageTest, strLstNew(), job, false, 0, &sizeProgress,
|
|
|
|
¤tPercentComplete), "log noop result");
|
|
|
|
TEST_RESULT_VOID(lockRelease(true), "release backup lock");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-03-06 14:01:24 -05:00
|
|
|
TEST_RESULT_LOG("P00 DETAIL: match file from prior backup host:" TEST_PATH "/test (0B, 100.00%)");
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Offline tests should only be used to test offline functionality and errors easily tested in offline mode
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("cmdBackup() offline"))
|
|
|
|
{
|
|
|
|
// Set log level to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
|
|
|
|
|
|
|
// Replace backup labels since the times are not deterministic
|
|
|
|
hrnLogReplaceAdd("[0-9]{8}-[0-9]{6}F_[0-9]{8}-[0-9]{6}I", NULL, "INCR", true);
|
|
|
|
hrnLogReplaceAdd("[0-9]{8}-[0-9]{6}F_[0-9]{8}-[0-9]{6}D", NULL, "DIFF", true);
|
|
|
|
hrnLogReplaceAdd("[0-9]{8}-[0-9]{6}F", NULL, "FULL", true);
|
|
|
|
|
|
|
|
// Create stanza
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdStanzaCreate, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
// Create pg_control
|
2022-01-06 15:34:04 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_90);
|
2021-07-15 17:00:20 -04:00
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
cmdStanzaCreate();
|
2021-05-22 14:09:45 -04:00
|
|
|
TEST_RESULT_LOG("P00 INFO: stanza-create for stanza 'test1' on repo1");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-06-17 15:14:59 -04:00
|
|
|
TEST_TITLE("error when pg appears to be running");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-10-13 19:36:59 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), PG_FILE_POSTMTRPID, "PID");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_ERROR(
|
2022-05-04 12:52:05 -04:00
|
|
|
testCmdBackup(), PgRunningError,
|
2022-01-20 08:41:05 -05:00
|
|
|
"--no-online passed but " PG_FILE_POSTMTRPID " exists - looks like " PG_NAME " is running. Shut down " PG_NAME " and"
|
|
|
|
" try again, or use --force.");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG("P00 WARN: no prior backup exists, incr backup has been changed to full");
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("offline full backup");
|
|
|
|
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptCompress, false);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptForce, true);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), "postgresql.conf", "CONFIGSTUFF");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG_FMT(
|
|
|
|
"P00 WARN: no prior backup exists, incr backup has been changed to full\n"
|
2022-01-20 08:41:05 -05:00
|
|
|
"P00 WARN: --no-online passed and " PG_FILE_POSTMTRPID " exists but --force was passed so backup will continue though"
|
|
|
|
" it looks like " PG_NAME " is running and the backup will probably not be consistent\n"
|
2022-05-04 12:52:05 -04:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (8KB, 99.86%%) checksum %s\n"
|
2022-03-06 14:01:24 -05:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/postgresql.conf (11B, 100.00%%) checksum"
|
2021-05-22 09:30:54 -04:00
|
|
|
" e3db315c260e79211b7b52587123b7aa060f30ab\n"
|
2021-08-11 13:39:36 -04:00
|
|
|
"P00 INFO: new backup label = [FULL-1]\n"
|
|
|
|
"P00 INFO: full backup size = 8KB, file total = 2",
|
2020-07-20 09:59:16 -04:00
|
|
|
TEST_64BIT() ?
|
2022-05-09 12:48:19 -04:00
|
|
|
(TEST_BIG_ENDIAN() ? "ec84602c8b4f62bd0ef10bd3dfcb04c3b3ce4a35" : "b7ec43e4646f5d06c95881df0c572630a1221377") :
|
2022-01-06 15:34:04 -05:00
|
|
|
"f21ff9abdcd1ec2f600d4ee8e5792c9b61eb2e37");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-06-17 15:14:59 -04:00
|
|
|
// Make pg no longer appear to be running
|
2021-10-13 19:36:59 -04:00
|
|
|
HRN_STORAGE_REMOVE(storagePgWrite(), PG_FILE_POSTMTRPID, .errorOnMissing = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when no files have changed");
|
|
|
|
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptCompress, true);
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeDiff);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_ERROR(testCmdBackup(), FileMissingError, "no files have changed since the last backup - this seems unlikely");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = [FULL-1], version = " PROJECT_VERSION "\n"
|
2022-03-22 08:35:34 -06:00
|
|
|
"P00 WARN: diff backup cannot alter compress-type option to 'gz', reset to value in [FULL-1]");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("offline incr backup to test unresumable backup");
|
|
|
|
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptCompress, false);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptChecksumPage, true);
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeIncr);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), PG_FILE_PGVERSION, "VER");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = [FULL-1], version = " PROJECT_VERSION "\n"
|
|
|
|
"P00 WARN: incr backup cannot alter 'checksum-page' option to 'true', reset to 'false' from [FULL-1]\n"
|
|
|
|
"P00 WARN: backup '[DIFF-1]' cannot be resumed: new backup type 'incr' does not match resumable backup type 'diff'\n"
|
2022-03-06 14:01:24 -05:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (3B, 100.00%) checksum c8663c2525f44b6d9c687fbceb4aafc63ed8b451\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 DETAIL: reference pg_data/global/pg_control to [FULL-1]\n"
|
|
|
|
"P00 DETAIL: reference pg_data/postgresql.conf to [FULL-1]\n"
|
2021-08-11 13:39:36 -04:00
|
|
|
"P00 INFO: new backup label = [INCR-1]\n"
|
|
|
|
"P00 INFO: incr backup size = 3B, file total = 3");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("offline diff backup to test prior backup must be full");
|
|
|
|
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptCompress, false);
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeDiff);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
sleepMSec(MSEC_PER_SEC - (timeMSec() % MSEC_PER_SEC));
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), PG_FILE_PGVERSION, "VR2");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = [FULL-1], version = " PROJECT_VERSION "\n"
|
2022-03-06 14:01:24 -05:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (3B, 100.00%) checksum 6f1894088c578e4f0b9888e8e8a997d93cbbc0c5\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 DETAIL: reference pg_data/global/pg_control to [FULL-1]\n"
|
|
|
|
"P00 DETAIL: reference pg_data/postgresql.conf to [FULL-1]\n"
|
2021-08-11 13:39:36 -04:00
|
|
|
"P00 INFO: new backup label = [DIFF-2]\n"
|
|
|
|
"P00 INFO: diff backup size = 3B, file total = 3");
|
2021-02-26 14:49:50 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("only repo2 configured");
|
|
|
|
|
|
|
|
// Create stanza on a second repo
|
|
|
|
argList = strLstNew();
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2021-05-22 09:30:54 -04:00
|
|
|
hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 2, TEST_PATH "/repo2");
|
2021-04-28 11:36:20 -04:00
|
|
|
hrnCfgArgKeyRawStrId(argList, cfgOptRepoCipherType, 2, cipherTypeAes256Cbc);
|
2021-02-26 14:49:50 -05:00
|
|
|
hrnCfgEnvKeyRawZ(cfgOptRepoCipherPass, 2, TEST_CIPHER_PASS);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptPgPath, TEST_PATH "/pg1");
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdStanzaCreate, argList);
|
2021-02-26 14:49:50 -05:00
|
|
|
|
|
|
|
cmdStanzaCreate();
|
2021-05-22 14:09:45 -04:00
|
|
|
TEST_RESULT_LOG("P00 INFO: stanza-create for stanza 'test1' on repo2");
|
2021-02-26 14:49:50 -05:00
|
|
|
|
|
|
|
// Set log level to warn
|
|
|
|
harnessLogLevelSet(logLevelWarn);
|
|
|
|
|
|
|
|
// With repo2 the only repo configured, ensure it is chosen by confirming diff is changed to full due to no prior backups
|
|
|
|
hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionFull, 2, "1");
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeDiff);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2021-02-26 14:49:50 -05:00
|
|
|
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
TEST_RESULT_LOG("P00 WARN: no prior backup exists, diff backup has been changed to full");
|
2021-02-26 14:49:50 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("multi-repo");
|
|
|
|
|
2021-07-08 11:17:13 -04:00
|
|
|
// Set log level to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
2021-02-26 14:49:50 -05:00
|
|
|
|
|
|
|
// Add repo1 to the configuration
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 1, TEST_PATH "/repo");
|
2021-02-26 14:49:50 -05:00
|
|
|
hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionFull, 1, "1");
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2021-02-26 14:49:50 -05:00
|
|
|
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
|
|
|
|
2021-02-26 14:49:50 -05:00
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: repo option not specified, defaulting to repo1\n"
|
|
|
|
"P00 INFO: last backup label = [FULL-1], version = " PROJECT_VERSION "\n"
|
|
|
|
"P00 WARN: diff backup cannot alter compress-type option to 'gz', reset to value in [FULL-1]\n"
|
2022-03-06 14:01:24 -05:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (3B, 100.00%) checksum 6f1894088c578e4f0b9888e8e8a997d93cbbc0c5\n"
|
2021-07-08 11:17:13 -04:00
|
|
|
"P00 DETAIL: reference pg_data/global/pg_control to [FULL-1]\n"
|
|
|
|
"P00 DETAIL: reference pg_data/postgresql.conf to [FULL-1]\n"
|
2021-08-11 13:39:36 -04:00
|
|
|
"P00 INFO: new backup label = [DIFF-3]\n"
|
|
|
|
"P00 INFO: diff backup size = 3B, file total = 3");
|
2021-02-26 14:49:50 -05:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("multi-repo - specify repo");
|
|
|
|
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepo, "2");
|
|
|
|
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2021-02-26 14:49:50 -05:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), PG_FILE_PGVERSION, "VER");
|
2021-02-26 14:49:50 -05:00
|
|
|
|
|
|
|
unsigned int backupCount = strLstSize(storageListP(storageRepoIdx(1), strNewFmt(STORAGE_PATH_BACKUP "/test1")));
|
|
|
|
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
|
|
|
|
2021-02-26 14:49:50 -05:00
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = [FULL-2], version = " PROJECT_VERSION "\n"
|
2022-03-06 14:01:24 -05:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (3B, 100.00%) checksum c8663c2525f44b6d9c687fbceb4aafc63ed8b451\n"
|
2021-07-08 11:17:13 -04:00
|
|
|
"P00 DETAIL: reference pg_data/global/pg_control to [FULL-2]\n"
|
|
|
|
"P00 DETAIL: reference pg_data/postgresql.conf to [FULL-2]\n"
|
2021-08-11 13:39:36 -04:00
|
|
|
"P00 INFO: new backup label = [DIFF-4]\n"
|
|
|
|
"P00 INFO: diff backup size = 3B, file total = 3");
|
2021-02-26 14:49:50 -05:00
|
|
|
TEST_RESULT_UINT(
|
|
|
|
strLstSize(storageListP(storageRepoIdx(1), strNewFmt(STORAGE_PATH_BACKUP "/test1"))), backupCount + 1,
|
|
|
|
"new backup repo2");
|
|
|
|
|
|
|
|
// Cleanup
|
|
|
|
hrnCfgEnvKeyRemoveRaw(cfgOptRepoCipherPass, 2);
|
|
|
|
harnessLogLevelReset();
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// *****************************************************************************************************************************
|
|
|
|
if (testBegin("cmdBackup() online"))
|
|
|
|
{
|
2021-05-22 09:30:54 -04:00
|
|
|
const String *pg1Path = STRDEF(TEST_PATH "/pg1");
|
|
|
|
const String *repoPath = STRDEF(TEST_PATH "/repo");
|
|
|
|
const String *pg2Path = STRDEF(TEST_PATH "/pg2");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Set log level to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
|
|
|
|
|
|
|
// Replace percent complete and backup size since they can cause a lot of churn when files are added/removed
|
2022-03-06 14:01:24 -05:00
|
|
|
hrnLogReplaceAdd(", [0-9]{1,3}.[0-9]{1,2}%\\)", "[0-9].+%", "PCT", false);
|
2019-12-13 17:14:26 -05:00
|
|
|
hrnLogReplaceAdd(" backup size = [0-9]+[A-Z]+", "[^ ]+$", "SIZE", false);
|
|
|
|
|
|
|
|
// Replace checksums since they can differ between architectures (e.g. 32/64 bit)
|
|
|
|
hrnLogReplaceAdd("\\) checksum [a-f0-9]{40}", "[a-f0-9]{40}$", "SHA1", false);
|
|
|
|
|
|
|
|
// Backup start time epoch. The idea is to not have backup times (and therefore labels) ever change. Each backup added
|
|
|
|
// should be separated by 100,000 seconds (1,000,000 after stanza-upgrade) but after the initial assignments this will only
|
|
|
|
// be possible at the beginning and the end, so new backups added in the middle will average the start times of the prior
|
|
|
|
// and next backup to get their start time. Backups added to the beginning of the test will need to subtract from the
|
|
|
|
// epoch.
|
|
|
|
#define BACKUP_EPOCH 1570000000
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online 9.5 resume uncompressed full backup");
|
|
|
|
|
|
|
|
time_t backupTimeStart = BACKUP_EPOCH;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Create stanza
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdStanzaCreate, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
// Create pg_control
|
2021-11-30 13:23:11 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_95);
|
2021-07-15 17:00:20 -04:00
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
cmdStanzaCreate();
|
2021-05-22 14:09:45 -04:00
|
|
|
TEST_RESULT_LOG("P00 INFO: stanza-create for stanza 'test1' on repo1");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Load options
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeFull);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptStopAuto, true);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptCompress, false);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptArchiveCheck, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Add files
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), "postgresql.conf", "CONFIGSTUFF", .timeModified = backupTimeStart);
|
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), PG_FILE_PGVERSION, PG_VERSION_95_STR, .timeModified = backupTimeStart);
|
|
|
|
HRN_STORAGE_PATH_CREATE(storagePgWrite(), strZ(pgWalPath(PG_VERSION_95)), .noParentCreate = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Create a backup manifest that looks like a halted backup manifest
|
2020-09-18 16:55:26 -04:00
|
|
|
Manifest *manifestResume = manifestNewBuild(
|
2022-02-14 13:24:14 -06:00
|
|
|
storagePg(), PG_VERSION_95, hrnPgCatalogVersion(PG_VERSION_95), true, false, false, NULL, NULL);
|
2019-12-13 17:14:26 -05:00
|
|
|
ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume);
|
|
|
|
|
|
|
|
manifestResumeData->backupType = backupTypeFull;
|
|
|
|
const String *resumeLabel = backupLabelCreate(backupTypeFull, NULL, backupTimeStart);
|
|
|
|
manifestBackupLabelSet(manifestResume, resumeLabel);
|
|
|
|
|
|
|
|
// Copy a file to be resumed that has not changed in the repo
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_COPY(
|
|
|
|
storagePg(), PG_FILE_PGVERSION, storageRepoWrite(),
|
2022-05-06 12:32:49 -04:00
|
|
|
zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/PG_VERSION", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
Pack manifest file structs to save memory.
Manifests with a very large number of files can use a considerable amount of memory. There are a lot of zeroes in the data so it can be stored more efficiently by using base-128 varint encoding for the integers and storing the strings in the same allocation.
The downside is that the data needs to be unpacked in order to be used, but in most cases this seems fast enough (about 10% slower than before) except for saving the manifest, which is 10% slower up to 10 million files and then gets about 5x slower by 100 million (two minutes on my M1 Mac). Profiling does not show this slowdown so I wonder if this is related to the change in memory layout. Curiously, the function that increased most was jsonFromStrInternal(), which was not modified. That gives more weight to the idea that there is some kind of memory issue going on here and one hopes that servers would be less affected. Either way, they largest use cases we have seen are for about 6 million files so if we can improve that case I believe we will be better off.
Further analysis showed that most of the time was taken up writing the size and timestamp fields, which makes almost no sense. The same amount of time was used if they were hard-coded to 0, which points to some odd memory issue on the M1 architecture.
This change has been planned for a while, but the particular impetus at this time is that small file support requires additional fields that would increase manifest memory usage by about 20%, even if the feature is not used.
Note that the Pack code has been updated to use the new varint encoder, but the decoder remains separate because it needs to fetch one byte at a time.
2022-01-21 17:05:07 -05:00
|
|
|
ManifestFilePack **const filePack = manifestFilePackFindInternal(manifestResume, STRDEF("pg_data/PG_VERSION"));
|
2022-01-24 16:21:07 -05:00
|
|
|
ManifestFile file = manifestFileUnpack(manifestResume, *filePack);
|
Pack manifest file structs to save memory.
Manifests with a very large number of files can use a considerable amount of memory. There are a lot of zeroes in the data so it can be stored more efficiently by using base-128 varint encoding for the integers and storing the strings in the same allocation.
The downside is that the data needs to be unpacked in order to be used, but in most cases this seems fast enough (about 10% slower than before) except for saving the manifest, which is 10% slower up to 10 million files and then gets about 5x slower by 100 million (two minutes on my M1 Mac). Profiling does not show this slowdown so I wonder if this is related to the change in memory layout. Curiously, the function that increased most was jsonFromStrInternal(), which was not modified. That gives more weight to the idea that there is some kind of memory issue going on here and one hopes that servers would be less affected. Either way, they largest use cases we have seen are for about 6 million files so if we can improve that case I believe we will be better off.
Further analysis showed that most of the time was taken up writing the size and timestamp fields, which makes almost no sense. The same amount of time was used if they were hard-coded to 0, which points to some odd memory issue on the M1 architecture.
This change has been planned for a while, but the particular impetus at this time is that small file support requires additional fields that would increase manifest memory usage by about 20%, even if the feature is not used.
Note that the Pack code has been updated to use the new varint encoder, but the decoder remains separate because it needs to fetch one byte at a time.
2022-01-21 17:05:07 -05:00
|
|
|
|
|
|
|
strcpy(file.checksumSha1, "06d06bb31b570b94d7b4325f511f853dbe771c21");
|
|
|
|
|
|
|
|
manifestFilePackUpdate(manifestResume, filePack, &file);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Save the resume manifest
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
2020-07-30 07:49:06 -04:00
|
|
|
strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strZ(resumeLabel)))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Run backup
|
2021-11-18 16:18:10 -05:00
|
|
|
testBackupPqScriptP(PG_VERSION_95, backupTimeStart, .noArchiveCheck = true, .noWal = true);
|
2022-05-04 12:52:05 -04:00
|
|
|
|
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: execute exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
|
|
|
"P00 INFO: backup start archive = 0000000105D944C000000000, lsn = 5d944c0/0\n"
|
|
|
|
"P00 WARN: resumable backup 20191002-070640F of same type exists -- remove invalid files and resume\n"
|
2021-07-09 13:50:35 -04:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"P01 DETAIL: checksum resumed file " TEST_PATH "/pg1/PG_VERSION (3B, [PCT]) checksum [SHA1]\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 INFO: execute exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
|
|
|
"P00 INFO: backup stop archive = 0000000105D944C000000000, lsn = 5d944c0/800000\n"
|
2021-08-11 13:39:36 -04:00
|
|
|
"P00 INFO: new backup label = 20191002-070640F\n"
|
|
|
|
"P00 INFO: full backup size = [SIZE], file total = 3");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 13:19:49 -04:00
|
|
|
TEST_RESULT_STR_Z(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191002-070640F}\n"
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION {file, s=3}\n"
|
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control {file, s=8192}\n"
|
|
|
|
"pg_data/pg_xlog {path}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/postgresql.conf {file, s=11}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"pg_data={\"path\":\"" TEST_PATH "/pg1\",\"type\":\"path\"}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"06d06bb31b570b94d7b4325f511f853dbe771c21\",\"size\":3"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\",\"size\":11"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_xlog={}\n",
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online resumed compressed 9.5 full backup");
|
|
|
|
|
|
|
|
// Backup start time
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 100000;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Load options
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeFull);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptStopAuto, true);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptArchiveCopy, true);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Create a backup manifest that looks like a halted backup manifest
|
2020-09-18 16:55:26 -04:00
|
|
|
Manifest *manifestResume = manifestNewBuild(
|
2022-02-14 13:24:14 -06:00
|
|
|
storagePg(), PG_VERSION_95, hrnPgCatalogVersion(PG_VERSION_95), true, false, false, NULL, NULL);
|
2019-12-13 17:14:26 -05:00
|
|
|
ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume);
|
|
|
|
|
|
|
|
manifestResumeData->backupType = backupTypeFull;
|
2020-03-06 14:41:03 -05:00
|
|
|
manifestResumeData->backupOptionCompressType = compressTypeGz;
|
2019-12-13 17:14:26 -05:00
|
|
|
const String *resumeLabel = backupLabelCreate(backupTypeFull, NULL, backupTimeStart);
|
|
|
|
manifestBackupLabelSet(manifestResume, resumeLabel);
|
|
|
|
|
|
|
|
// File exists in cluster and repo but not in the resume manifest
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), "not-in-resume", "TEST", .timeModified = backupTimeStart);
|
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/not-in-resume.gz", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Remove checksum from file so it won't be resumed
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/global/pg_control.gz", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
Pack manifest file structs to save memory.
Manifests with a very large number of files can use a considerable amount of memory. There are a lot of zeroes in the data so it can be stored more efficiently by using base-128 varint encoding for the integers and storing the strings in the same allocation.
The downside is that the data needs to be unpacked in order to be used, but in most cases this seems fast enough (about 10% slower than before) except for saving the manifest, which is 10% slower up to 10 million files and then gets about 5x slower by 100 million (two minutes on my M1 Mac). Profiling does not show this slowdown so I wonder if this is related to the change in memory layout. Curiously, the function that increased most was jsonFromStrInternal(), which was not modified. That gives more weight to the idea that there is some kind of memory issue going on here and one hopes that servers would be less affected. Either way, they largest use cases we have seen are for about 6 million files so if we can improve that case I believe we will be better off.
Further analysis showed that most of the time was taken up writing the size and timestamp fields, which makes almost no sense. The same amount of time was used if they were hard-coded to 0, which points to some odd memory issue on the M1 architecture.
This change has been planned for a while, but the particular impetus at this time is that small file support requires additional fields that would increase manifest memory usage by about 20%, even if the feature is not used.
Note that the Pack code has been updated to use the new varint encoder, but the decoder remains separate because it needs to fetch one byte at a time.
2022-01-21 17:05:07 -05:00
|
|
|
ManifestFilePack **const filePack = manifestFilePackFindInternal(manifestResume, STRDEF("pg_data/global/pg_control"));
|
2022-01-24 16:21:07 -05:00
|
|
|
ManifestFile file = manifestFileUnpack(manifestResume, *filePack);
|
Pack manifest file structs to save memory.
Manifests with a very large number of files can use a considerable amount of memory. There are a lot of zeroes in the data so it can be stored more efficiently by using base-128 varint encoding for the integers and storing the strings in the same allocation.
The downside is that the data needs to be unpacked in order to be used, but in most cases this seems fast enough (about 10% slower than before) except for saving the manifest, which is 10% slower up to 10 million files and then gets about 5x slower by 100 million (two minutes on my M1 Mac). Profiling does not show this slowdown so I wonder if this is related to the change in memory layout. Curiously, the function that increased most was jsonFromStrInternal(), which was not modified. That gives more weight to the idea that there is some kind of memory issue going on here and one hopes that servers would be less affected. Either way, they largest use cases we have seen are for about 6 million files so if we can improve that case I believe we will be better off.
Further analysis showed that most of the time was taken up writing the size and timestamp fields, which makes almost no sense. The same amount of time was used if they were hard-coded to 0, which points to some odd memory issue on the M1 architecture.
This change has been planned for a while, but the particular impetus at this time is that small file support requires additional fields that would increase manifest memory usage by about 20%, even if the feature is not used.
Note that the Pack code has been updated to use the new varint encoder, but the decoder remains separate because it needs to fetch one byte at a time.
2022-01-21 17:05:07 -05:00
|
|
|
|
|
|
|
file.checksumSha1[0] = 0;
|
|
|
|
|
|
|
|
manifestFilePackUpdate(manifestResume, filePack, &file);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Size does not match between cluster and resume manifest
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), "size-mismatch", "TEST", .timeModified = backupTimeStart);
|
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/size-mismatch.gz", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
manifestFileAdd(
|
2022-02-20 16:45:07 -06:00
|
|
|
manifestResume, &(ManifestFile){
|
2020-11-09 16:26:43 -05:00
|
|
|
.name = STRDEF("pg_data/size-mismatch"), .checksumSha1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
|
|
|
.size = 33});
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Time does not match between cluster and resume manifest
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), "time-mismatch", "TEST", .timeModified = backupTimeStart);
|
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/time-mismatch.gz", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
manifestFileAdd(
|
2022-02-20 16:45:07 -06:00
|
|
|
manifestResume, &(ManifestFile){
|
2019-12-13 17:14:26 -05:00
|
|
|
.name = STRDEF("pg_data/time-mismatch"), .checksumSha1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", .size = 4,
|
|
|
|
.timestamp = backupTimeStart - 1});
|
|
|
|
|
|
|
|
// Size is zero in cluster and resume manifest. ??? We'd like to remove this requirement after the migration.
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(storagePgWrite(), "zero-size", .timeModified = backupTimeStart);
|
|
|
|
HRN_STORAGE_PUT_Z(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/zero-size.gz", strZ(resumeLabel)), "ZERO-SIZE");
|
2019-12-13 17:14:26 -05:00
|
|
|
manifestFileAdd(
|
2022-02-20 16:45:07 -06:00
|
|
|
manifestResume, &(ManifestFile){.name = STRDEF("pg_data/zero-size"), .size = 0, .timestamp = backupTimeStart});
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Path is not in manifest
|
2022-05-06 12:32:49 -04:00
|
|
|
HRN_STORAGE_PATH_CREATE(storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/bogus_path", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// File is not in manifest
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/global/bogus.gz", strZ(resumeLabel)));
|
2020-03-06 14:41:03 -05:00
|
|
|
|
|
|
|
// File has incorrect compression type
|
2022-05-06 12:32:49 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/global/bogus", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Save the resume manifest
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
2020-07-30 07:49:06 -04:00
|
|
|
strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strZ(resumeLabel)))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Disable storageFeaturePath so paths will not be created before files are copied
|
2021-04-07 14:04:38 -04:00
|
|
|
((Storage *)storageRepoWrite())->pub.interface.feature ^= 1 << storageFeaturePath;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Disable storageFeaturePathSync so paths will not be synced
|
2021-04-07 14:04:38 -04:00
|
|
|
((Storage *)storageRepoWrite())->pub.interface.feature ^= 1 << storageFeaturePathSync;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Run backup
|
|
|
|
testBackupPqScriptP(PG_VERSION_95, backupTimeStart);
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Enable storage features
|
2021-04-07 14:04:38 -04:00
|
|
|
((Storage *)storageRepoWrite())->pub.interface.feature |= 1 << storageFeaturePath;
|
|
|
|
((Storage *)storageRepoWrite())->pub.interface.feature |= 1 << storageFeaturePathSync;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: execute exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
|
|
|
"P00 INFO: backup start archive = 0000000105D95D3000000000, lsn = 5d95d30/0\n"
|
2021-11-18 16:18:10 -05:00
|
|
|
"P00 INFO: check archive for prior segment 0000000105D95D2F000000FF\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 WARN: resumable backup 20191003-105320F of same type exists -- remove invalid files and resume\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"P00 DETAIL: remove path '" TEST_PATH "/repo/backup/test1/20191003-105320F/pg_data/bogus_path' from resumed"
|
|
|
|
" backup\n"
|
|
|
|
"P00 DETAIL: remove file '" TEST_PATH "/repo/backup/test1/20191003-105320F/pg_data/global/bogus' from resumed"
|
|
|
|
" backup (mismatched compression type)\n"
|
|
|
|
"P00 DETAIL: remove file '" TEST_PATH "/repo/backup/test1/20191003-105320F/pg_data/global/bogus.gz' from resumed"
|
|
|
|
" backup (missing in manifest)\n"
|
|
|
|
"P00 DETAIL: remove file '" TEST_PATH "/repo/backup/test1/20191003-105320F/pg_data/global/pg_control.gz' from"
|
|
|
|
" resumed backup (no checksum in resumed manifest)\n"
|
|
|
|
"P00 DETAIL: remove file '" TEST_PATH "/repo/backup/test1/20191003-105320F/pg_data/not-in-resume.gz' from resumed"
|
|
|
|
" backup (missing in resumed manifest)\n"
|
|
|
|
"P00 DETAIL: remove file '" TEST_PATH "/repo/backup/test1/20191003-105320F/pg_data/size-mismatch.gz' from resumed"
|
|
|
|
" backup (mismatched size)\n"
|
|
|
|
"P00 DETAIL: remove file '" TEST_PATH "/repo/backup/test1/20191003-105320F/pg_data/time-mismatch.gz' from resumed"
|
|
|
|
" backup (mismatched timestamp)\n"
|
|
|
|
"P00 DETAIL: remove file '" TEST_PATH "/repo/backup/test1/20191003-105320F/pg_data/zero-size.gz' from resumed"
|
|
|
|
" backup (zero size)\n"
|
2021-07-09 13:50:35 -04:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/time-mismatch (4B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/size-mismatch (4B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/not-in-resume (4B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (3B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/zero-size (0B, [PCT])\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 INFO: execute exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
|
|
|
"P00 INFO: backup stop archive = 0000000105D95D3000000000, lsn = 5d95d30/800000\n"
|
|
|
|
"P00 INFO: check archive for segment(s) 0000000105D95D3000000000:0000000105D95D3000000000\n"
|
2021-03-11 08:22:44 -05:00
|
|
|
"P00 DETAIL: copy segment 0000000105D95D3000000000 to backup\n"
|
2021-08-11 13:39:36 -04:00
|
|
|
"P00 INFO: new backup label = 20191003-105320F\n"
|
|
|
|
"P00 INFO: full backup size = [SIZE], file total = 8");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 13:19:49 -04:00
|
|
|
TEST_RESULT_STR_Z(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191003-105320F}\n"
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION.gz {file, s=3}\n"
|
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control.gz {file, s=8192}\n"
|
|
|
|
"pg_data/not-in-resume.gz {file, s=4}\n"
|
|
|
|
"pg_data/pg_xlog {path}\n"
|
|
|
|
"pg_data/pg_xlog/0000000105D95D3000000000.gz {file, s=16777216}\n"
|
|
|
|
"pg_data/postgresql.conf.gz {file, s=11}\n"
|
|
|
|
"pg_data/size-mismatch.gz {file, s=4}\n"
|
|
|
|
"pg_data/time-mismatch.gz {file, s=4}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/zero-size.gz {file, s=0}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"pg_data={\"path\":\"" TEST_PATH "/pg1\",\"type\":\"path\"}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"06d06bb31b570b94d7b4325f511f853dbe771c21\",\"size\":3"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
2021-11-30 13:23:11 -05:00
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1570100000}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/not-in-resume={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
|
|
|
|
",\"timestamp\":1570100000}\n"
|
|
|
|
"pg_data/pg_xlog/0000000105D95D3000000000={\"size\":16777216,\"timestamp\":1570100002}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\",\"size\":11"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/size-mismatch={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
|
|
|
|
",\"timestamp\":1570100000}\n"
|
|
|
|
"pg_data/time-mismatch={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
|
|
|
|
",\"timestamp\":1570100000}\n"
|
|
|
|
"pg_data/zero-size={\"size\":0,\"timestamp\":1570100000}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_xlog={}\n",
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
|
|
|
|
// Remove test files
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_REMOVE(storagePgWrite(), "not-in-resume", .errorOnMissing = true);
|
|
|
|
HRN_STORAGE_REMOVE(storagePgWrite(), "size-mismatch", .errorOnMissing = true);
|
|
|
|
HRN_STORAGE_REMOVE(storagePgWrite(), "time-mismatch", .errorOnMissing = true);
|
|
|
|
HRN_STORAGE_REMOVE(storagePgWrite(), "zero-size", .errorOnMissing = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online resumed compressed 9.5 diff backup");
|
|
|
|
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 200000;
|
|
|
|
|
|
|
|
{
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeDiff);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptCompress, false);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptStopAuto, true);
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoHardlink, true);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Load the previous manifest and null out the checksum-page option to be sure it gets set to false in this backup
|
|
|
|
const String *manifestPriorFile = STRDEF(STORAGE_REPO_BACKUP "/latest/" BACKUP_MANIFEST_FILE);
|
|
|
|
Manifest *manifestPrior = manifestNewLoad(storageReadIo(storageNewReadP(storageRepo(), manifestPriorFile)));
|
|
|
|
((ManifestData *)manifestData(manifestPrior))->backupOptionChecksumPage = NULL;
|
|
|
|
manifestSave(manifestPrior, storageWriteIo(storageNewWriteP(storageRepoWrite(), manifestPriorFile)));
|
|
|
|
|
|
|
|
// Create a backup manifest that looks like a halted backup manifest
|
2020-09-18 16:55:26 -04:00
|
|
|
Manifest *manifestResume = manifestNewBuild(
|
2022-02-14 13:24:14 -06:00
|
|
|
storagePg(), PG_VERSION_95, hrnPgCatalogVersion(PG_VERSION_95), true, false, false, NULL, NULL);
|
2019-12-13 17:14:26 -05:00
|
|
|
ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume);
|
|
|
|
|
|
|
|
manifestResumeData->backupType = backupTypeDiff;
|
|
|
|
manifestResumeData->backupLabelPrior = manifestData(manifestPrior)->backupLabel;
|
2020-03-06 14:41:03 -05:00
|
|
|
manifestResumeData->backupOptionCompressType = compressTypeGz;
|
2020-07-30 07:49:06 -04:00
|
|
|
const String *resumeLabel = backupLabelCreate(
|
|
|
|
backupTypeDiff, manifestData(manifestPrior)->backupLabel, backupTimeStart);
|
2019-12-13 17:14:26 -05:00
|
|
|
manifestBackupLabelSet(manifestResume, resumeLabel);
|
|
|
|
|
|
|
|
// Reference in manifest
|
2022-05-06 12:32:49 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/PG_VERSION.gz", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Reference in resumed manifest
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(storagePgWrite(), "resume-ref", .timeModified = backupTimeStart);
|
2022-05-06 12:32:49 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/resume-ref.gz", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
manifestFileAdd(
|
2022-02-20 16:45:07 -06:00
|
|
|
manifestResume, &(ManifestFile){.name = STRDEF("pg_data/resume-ref"), .size = 0, .reference = STRDEF("BOGUS")});
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-11-09 16:26:43 -05:00
|
|
|
// Time does not match between cluster and resume manifest (but resume because time is in future so delta enabled). Note
|
2019-12-13 17:14:26 -05:00
|
|
|
// also that the repo file is intenionally corrupt to generate a warning about corruption in the repository.
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), "time-mismatch2", "TEST", .timeModified = backupTimeStart + 100);
|
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
2022-05-06 12:32:49 -04:00
|
|
|
storageRepoWrite(), zNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/time-mismatch2.gz", strZ(resumeLabel)));
|
2019-12-13 17:14:26 -05:00
|
|
|
manifestFileAdd(
|
2022-02-20 16:45:07 -06:00
|
|
|
manifestResume, &(ManifestFile){
|
2019-12-13 17:14:26 -05:00
|
|
|
.name = STRDEF("pg_data/time-mismatch2"), .checksumSha1 = "984816fd329622876e14907634264e6f332e9fb3", .size = 4,
|
|
|
|
.timestamp = backupTimeStart});
|
|
|
|
|
|
|
|
// Links are always removed on resume
|
|
|
|
THROW_ON_SYS_ERROR(
|
|
|
|
symlink(
|
|
|
|
"..",
|
2020-07-30 07:49:06 -04:00
|
|
|
strZ(storagePathP(storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/link", strZ(resumeLabel))))) == -1,
|
2019-12-13 17:14:26 -05:00
|
|
|
FileOpenError, "unable to create symlink");
|
|
|
|
|
|
|
|
// Special files should not be in the repo
|
2021-05-22 14:22:51 -04:00
|
|
|
HRN_SYSTEM_FMT(
|
2019-12-13 17:14:26 -05:00
|
|
|
"mkfifo -m 666 %s",
|
2020-07-30 07:49:06 -04:00
|
|
|
strZ(storagePathP(storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/pipe", strZ(resumeLabel)))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Save the resume manifest
|
|
|
|
manifestSave(
|
|
|
|
manifestResume,
|
|
|
|
storageWriteIo(
|
|
|
|
storageNewWriteP(
|
|
|
|
storageRepoWrite(),
|
2020-07-30 07:49:06 -04:00
|
|
|
strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strZ(resumeLabel)))));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Run backup
|
|
|
|
testBackupPqScriptP(PG_VERSION_95, backupTimeStart);
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Check log
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = 20191003-105320F, version = " PROJECT_VERSION "\n"
|
2020-03-06 14:41:03 -05:00
|
|
|
"P00 WARN: diff backup cannot alter compress-type option to 'none', reset to value in 20191003-105320F\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 INFO: execute exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
|
|
|
"P00 INFO: backup start archive = 0000000105D9759000000000, lsn = 5d97590/0\n"
|
2021-11-18 16:18:10 -05:00
|
|
|
"P00 INFO: check archive for prior segment 0000000105D9758F000000FF\n"
|
2022-05-18 08:48:48 -04:00
|
|
|
"P00 WARN: file 'time-mismatch2' has timestamp (1570200100) in the future (relative to copy start 1570200000),"
|
|
|
|
" enabling delta checksum\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 WARN: resumable backup 20191003-105320F_20191004-144000D of same type exists"
|
|
|
|
" -- remove invalid files and resume\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"P00 DETAIL: remove file '" TEST_PATH "/repo/backup/test1/20191003-105320F_20191004-144000D/pg_data/PG_VERSION.gz'"
|
2019-12-13 17:14:26 -05:00
|
|
|
" from resumed backup (reference in manifest)\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"P00 WARN: remove special file '" TEST_PATH "/repo/backup/test1/20191003-105320F_20191004-144000D/pg_data/pipe'"
|
2019-12-13 17:14:26 -05:00
|
|
|
" from resumed backup\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"P00 DETAIL: remove file '" TEST_PATH "/repo/backup/test1/20191003-105320F_20191004-144000D/pg_data/resume-ref.gz'"
|
2019-12-13 17:14:26 -05:00
|
|
|
" from resumed backup (reference in resumed manifest)\n"
|
2021-12-07 09:21:07 -05:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"P01 DETAIL: match file from prior backup " TEST_PATH "/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 WARN: resumed backup file pg_data/time-mismatch2 does not have expected checksum"
|
2020-11-09 16:26:43 -05:00
|
|
|
" 984816fd329622876e14907634264e6f332e9fb3. The file will be recopied and backup will continue but this may be"
|
|
|
|
" an issue unless the resumed backup path in the repository is known to be corrupted.\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
" NOTE: this does not indicate a problem with the PostgreSQL page checksums.\n"
|
2021-07-09 13:50:35 -04:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/time-mismatch2 (4B, [PCT]) checksum [SHA1]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"P01 DETAIL: match file from prior backup " TEST_PATH "/pg1/PG_VERSION (3B, [PCT]) checksum [SHA1]\n"
|
2021-07-09 13:50:35 -04:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/resume-ref (0B, [PCT])\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 DETAIL: hardlink pg_data/PG_VERSION to 20191003-105320F\n"
|
|
|
|
"P00 DETAIL: hardlink pg_data/postgresql.conf to 20191003-105320F\n"
|
|
|
|
"P00 INFO: execute exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
|
|
|
"P00 INFO: backup stop archive = 0000000105D9759000000000, lsn = 5d97590/800000\n"
|
|
|
|
"P00 INFO: check archive for segment(s) 0000000105D9759000000000:0000000105D9759000000000\n"
|
2021-08-11 13:39:36 -04:00
|
|
|
"P00 INFO: new backup label = 20191003-105320F_20191004-144000D\n"
|
|
|
|
"P00 INFO: diff backup size = [SIZE], file total = 5");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Check repo directory
|
2021-07-15 13:19:49 -04:00
|
|
|
TEST_RESULT_STR_Z(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191003-105320F_20191004-144000D}\n"
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION.gz {file, s=3}\n"
|
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control.gz {file, s=8192}\n"
|
|
|
|
"pg_data/pg_xlog {path}\n"
|
|
|
|
"pg_data/postgresql.conf.gz {file, s=11}\n"
|
|
|
|
"pg_data/resume-ref.gz {file, s=0}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/time-mismatch2.gz {file, s=4}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"pg_data={\"path\":\"" TEST_PATH "/pg1\",\"type\":\"path\"}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"06d06bb31b570b94d7b4325f511f853dbe771c21\",\"reference\":\"20191003-105320F\""
|
|
|
|
",\"size\":3,\"timestamp\":1570000000}\n"
|
2021-12-07 09:21:07 -05:00
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1570200000}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\""
|
|
|
|
",\"reference\":\"20191003-105320F\",\"size\":11,\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/resume-ref={\"size\":0,\"timestamp\":1570200000}\n"
|
|
|
|
"pg_data/time-mismatch2={\"checksum\":\"984816fd329622876e14907634264e6f332e9fb3\",\"size\":4"
|
|
|
|
",\"timestamp\":1570200100}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_xlog={}\n",
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
|
|
|
|
// Remove test files
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_REMOVE(storagePgWrite(), "resume-ref", .errorOnMissing = true);
|
|
|
|
HRN_STORAGE_REMOVE(storagePgWrite(), "time-mismatch2", .errorOnMissing = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
2020-01-26 13:19:13 -07:00
|
|
|
TEST_TITLE("online 9.6 backup-standby full backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 1200000;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Update pg_control
|
2021-11-30 13:23:11 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_96);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Update version
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), PG_FILE_PGVERSION, PG_VERSION_96_STR, .timeModified = backupTimeStart);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Upgrade stanza
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdStanzaUpgrade, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
cmdStanzaUpgrade();
|
2021-05-22 14:09:45 -04:00
|
|
|
TEST_RESULT_LOG("P00 INFO: stanza-upgrade for stanza 'test1' on repo1");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Load options
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgKeyRaw(argList, cfgOptPgPath, 1, pg1Path);
|
|
|
|
hrnCfgArgKeyRaw(argList, cfgOptPgPath, 2, pg2Path);
|
|
|
|
hrnCfgArgKeyRawZ(argList, cfgOptPgPort, 2, "5433");
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptCompress, false);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptBackupStandby, true);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptStartFast, true);
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptArchiveCopy, true);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-12-07 09:21:07 -05:00
|
|
|
// Add pg_control to standby
|
|
|
|
HRN_PG_CONTROL_PUT(storagePgIdxWrite(1), PG_VERSION_96);
|
|
|
|
|
2020-03-18 13:40:16 -04:00
|
|
|
// Create file to copy from the standby. This file will be zero-length on the primary and non-zero-length on the standby
|
2020-03-19 12:11:20 -04:00
|
|
|
// but no bytes will be copied.
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(storagePgIdxWrite(0), PG_PATH_BASE "/1/1", .timeModified = backupTimeStart);
|
|
|
|
HRN_STORAGE_PUT_Z(storagePgIdxWrite(1), PG_PATH_BASE "/1/1", "1234");
|
2020-03-18 13:40:16 -04:00
|
|
|
|
|
|
|
// Create file to copy from the standby. This file will be smaller on the primary than the standby and have no common
|
|
|
|
// data in the bytes that exist on primary and standby. If the file is copied from the primary instead of the standby
|
|
|
|
// the checksum will change but not the size.
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgIdxWrite(0), PG_PATH_BASE "/1/2", "DA", .timeModified = backupTimeStart);
|
|
|
|
HRN_STORAGE_PUT_Z(storagePgIdxWrite(1), PG_PATH_BASE "/1/2", "5678");
|
2020-03-18 13:40:16 -04:00
|
|
|
|
|
|
|
// Create file to copy from the standby. This file will be larger on the primary than the standby and have no common
|
|
|
|
// data in the bytes that exist on primary and standby. If the file is copied from the primary instead of the standby
|
|
|
|
// the checksum and size will change.
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgIdxWrite(0), PG_PATH_BASE "/1/3", "TEST", .timeModified = backupTimeStart);
|
|
|
|
HRN_STORAGE_PUT_Z(storagePgIdxWrite(1), PG_PATH_BASE "/1/3", "ABC");
|
2020-01-26 13:19:13 -07:00
|
|
|
|
|
|
|
// Create a file on the primary that does not exist on the standby to test that the file is removed from the manifest
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgIdxWrite(0), PG_PATH_BASE "/1/0", "DATA", .timeModified = backupTimeStart);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Set log level to warn because the following test uses multiple processes so the log order will not be deterministic
|
|
|
|
harnessLogLevelSet(logLevelWarn);
|
|
|
|
|
2021-11-18 16:18:10 -05:00
|
|
|
// Run backup but error on first archive check
|
|
|
|
testBackupPqScriptP(
|
|
|
|
PG_VERSION_96, backupTimeStart, .noPriorWal = true, .backupStandby = true, .walCompressType = compressTypeGz);
|
|
|
|
TEST_ERROR(
|
2022-05-04 12:52:05 -04:00
|
|
|
testCmdBackup(), ArchiveTimeoutError,
|
2021-11-18 16:18:10 -05:00
|
|
|
"WAL segment 0000000105DA69BF000000FF was not archived before the 100ms timeout\n"
|
|
|
|
"HINT: check the archive_command to ensure that all options are correct (especially --stanza).\n"
|
|
|
|
"HINT: check the PostgreSQL server log for errors.\n"
|
|
|
|
"HINT: run the 'start' command if the stanza was previously stopped.");
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// Run backup but error on archive check
|
2021-11-18 16:18:10 -05:00
|
|
|
testBackupPqScriptP(
|
|
|
|
PG_VERSION_96, backupTimeStart, .noWal = true, .backupStandby = true, .walCompressType = compressTypeGz);
|
2019-12-13 17:14:26 -05:00
|
|
|
TEST_ERROR(
|
2022-05-04 12:52:05 -04:00
|
|
|
testCmdBackup(), ArchiveTimeoutError,
|
2019-12-13 17:14:26 -05:00
|
|
|
"WAL segment 0000000105DA69C000000000 was not archived before the 100ms timeout\n"
|
|
|
|
"HINT: check the archive_command to ensure that all options are correct (especially --stanza).\n"
|
2020-09-03 07:49:49 -04:00
|
|
|
"HINT: check the PostgreSQL server log for errors.\n"
|
|
|
|
"HINT: run the 'start' command if the stanza was previously stopped.");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Remove halted backup so there's no resume
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PATH_REMOVE(storageRepoWrite(), STORAGE_REPO_BACKUP "/20191016-042640F", .recurse = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Run backup
|
2020-03-06 14:41:03 -05:00
|
|
|
testBackupPqScriptP(PG_VERSION_96, backupTimeStart, .backupStandby = true, .walCompressType = compressTypeGz);
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Set log level back to detail
|
|
|
|
harnessLogLevelSet(logLevelDetail);
|
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 WARN: no prior backup exists, incr backup has been changed to full");
|
|
|
|
|
2021-07-15 13:19:49 -04:00
|
|
|
TEST_RESULT_STR_Z(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191016-042640F}\n"
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION {file, s=3}\n"
|
|
|
|
"pg_data/backup_label {file, s=17}\n"
|
|
|
|
"pg_data/base {path}\n"
|
|
|
|
"pg_data/base/1 {path}\n"
|
2020-03-19 13:16:05 -04:00
|
|
|
"pg_data/base/1/1 {file, s=0}\n"
|
|
|
|
"pg_data/base/1/2 {file, s=2}\n"
|
2020-03-18 13:40:16 -04:00
|
|
|
"pg_data/base/1/3 {file, s=3}\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control {file, s=8192}\n"
|
|
|
|
"pg_data/pg_xlog {path}\n"
|
|
|
|
"pg_data/pg_xlog/0000000105DA69C000000000 {file, s=16777216}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/postgresql.conf {file, s=11}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"pg_data={\"path\":\"" TEST_PATH "/pg1\",\"type\":\"path\"}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"f5b7e6d36dc0113f61b36c700817d42b96f7b037\",\"size\":3"
|
|
|
|
",\"timestamp\":1571200000}\n"
|
|
|
|
"pg_data/backup_label={\"checksum\":\"8e6f41ac87a7514be96260d65bacbffb11be77dc\",\"size\":17"
|
|
|
|
",\"timestamp\":1571200002}\n"
|
2022-01-20 14:01:10 -05:00
|
|
|
"pg_data/base/1/1={\"size\":0,\"timestamp\":1571200000}\n"
|
|
|
|
"pg_data/base/1/2={\"checksum\":\"54ceb91256e8190e474aa752a6e0650a2df5ba37\",\"size\":2,\"timestamp\":1571200000}\n"
|
|
|
|
"pg_data/base/1/3={\"checksum\":\"3c01bdbb26f358bab27f267924aa2c9a03fcfdb8\",\"size\":3,\"timestamp\":1571200000}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1571200000}\n"
|
|
|
|
"pg_data/pg_xlog/0000000105DA69C000000000={\"size\":16777216,\"timestamp\":1571200002}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\",\"size\":11"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/base={}\n"
|
|
|
|
"pg_data/base/1={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_xlog={}\n",
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
|
|
|
|
// Remove test files
|
2021-07-19 15:20:43 -04:00
|
|
|
HRN_STORAGE_PATH_REMOVE(storagePgIdxWrite(1), NULL, .recurse = true);
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PATH_REMOVE(storagePgWrite(), "base/1", .recurse = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online 11 full backup with tablespaces and page checksums");
|
|
|
|
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 2200000;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Update pg_control
|
2021-11-30 13:23:11 -05:00
|
|
|
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_11, .pageChecksum = true, .walSegmentSize = 1024 * 1024);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Update version
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), PG_FILE_PGVERSION, PG_VERSION_11_STR, .timeModified = backupTimeStart);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Update wal path
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PATH_REMOVE(storagePgWrite(), strZ(pgWalPath(PG_VERSION_95)));
|
|
|
|
HRN_STORAGE_PATH_CREATE(storagePgWrite(), strZ(pgWalPath(PG_VERSION_11)), .noParentCreate = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Upgrade stanza
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptOnline, false);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdStanzaUpgrade, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
cmdStanzaUpgrade();
|
2021-05-22 14:09:45 -04:00
|
|
|
TEST_RESULT_LOG("P00 INFO: stanza-upgrade for stanza 'test1' on repo1");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Load options
|
|
|
|
argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeFull);
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoHardlink, true);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptManifestSaveThreshold, "1");
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptArchiveCopy, true);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-04-21 17:55:36 -04:00
|
|
|
// Move pg1-path and put a link in its place. This tests that backup works when pg1-path is a symlink yet should be
|
|
|
|
// completely invisible in the manifest and logging.
|
2021-05-22 14:22:51 -04:00
|
|
|
HRN_SYSTEM_FMT("mv %s %s-data", strZ(pg1Path), strZ(pg1Path));
|
|
|
|
HRN_SYSTEM_FMT("ln -s %s-data %s ", strZ(pg1Path), strZ(pg1Path));
|
2020-04-21 17:55:36 -04:00
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// Zeroed file which passes page checksums
|
|
|
|
Buffer *relation = bufNew(PG_PAGE_SIZE_DEFAULT);
|
|
|
|
memset(bufPtr(relation), 0, bufSize(relation));
|
|
|
|
bufUsedSet(relation, bufSize(relation));
|
|
|
|
|
2020-01-23 14:15:58 -07:00
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0};
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/1", relation, .timeModified = backupTimeStart);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2022-02-23 12:05:53 -06:00
|
|
|
// File which will fail on alignment
|
|
|
|
relation = bufNew(PG_PAGE_SIZE_DEFAULT + 512);
|
2019-12-13 17:14:26 -05:00
|
|
|
memset(bufPtr(relation), 0, bufSize(relation));
|
|
|
|
bufUsedSet(relation, bufSize(relation));
|
|
|
|
|
2022-02-23 12:05:53 -06:00
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0xFE};
|
|
|
|
((PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)))->pd_checksum = pgPageChecksum(
|
|
|
|
bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00), 0);
|
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0xFF};
|
2020-01-23 14:15:58 -07:00
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/2", relation, .timeModified = backupTimeStart);
|
2022-05-25 15:27:53 -04:00
|
|
|
const char *rel1_2Sha1 = strZ(bufHex(cryptoHashOne(hashTypeSha1, relation)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// File with bad page checksums
|
2022-02-23 13:17:14 -06:00
|
|
|
relation = bufNew(PG_PAGE_SIZE_DEFAULT * 5);
|
2019-12-13 17:14:26 -05:00
|
|
|
memset(bufPtr(relation), 0, bufSize(relation));
|
2020-01-23 14:15:58 -07:00
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0xFF};
|
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0x00};
|
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02)) = (PageHeaderData){.pd_upper = 0xFE};
|
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x03)) = (PageHeaderData){.pd_upper = 0xEF};
|
2022-02-23 13:17:14 -06:00
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x04)) = (PageHeaderData){.pd_upper = 0x00};
|
|
|
|
(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x04))[PG_PAGE_SIZE_DEFAULT - 1] = 0xFF;
|
2019-12-13 17:14:26 -05:00
|
|
|
bufUsedSet(relation, bufSize(relation));
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/3", relation, .timeModified = backupTimeStart);
|
2022-05-25 15:27:53 -04:00
|
|
|
const char *rel1_3Sha1 = strZ(bufHex(cryptoHashOne(hashTypeSha1, relation)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// File with bad page checksum
|
|
|
|
relation = bufNew(PG_PAGE_SIZE_DEFAULT * 3);
|
|
|
|
memset(bufPtr(relation), 0, bufSize(relation));
|
2020-01-23 14:15:58 -07:00
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0x00};
|
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0x08};
|
2022-02-23 12:05:53 -06:00
|
|
|
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02)) = (PageHeaderData){.pd_upper = 0xFF};
|
|
|
|
((PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02)))->pd_checksum = pgPageChecksum(
|
|
|
|
bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02), 2);
|
2019-12-13 17:14:26 -05:00
|
|
|
bufUsedSet(relation, bufSize(relation));
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/4", relation, .timeModified = backupTimeStart);
|
2022-05-25 15:27:53 -04:00
|
|
|
const char *rel1_4Sha1 = strZ(bufHex(cryptoHashOne(hashTypeSha1, relation)));
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Add a tablespace
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PATH_CREATE(storagePgWrite(), PG_PATH_PGTBLSPC);
|
2019-12-13 17:14:26 -05:00
|
|
|
THROW_ON_SYS_ERROR(
|
2020-07-30 07:49:06 -04:00
|
|
|
symlink("../../pg1-tblspc/32768", strZ(storagePathP(storagePg(), STRDEF(PG_PATH_PGTBLSPC "/32768")))) == -1,
|
2019-12-13 17:14:26 -05:00
|
|
|
FileOpenError, "unable to create symlink");
|
|
|
|
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PUT_EMPTY(
|
|
|
|
storageTest,
|
2022-05-06 12:32:49 -04:00
|
|
|
zNewFmt("pg1-tblspc/32768/%s/1/5", strZ(pgTablespaceId(PG_VERSION_11, hrnPgCatalogVersion(PG_VERSION_11)))),
|
2021-07-15 17:00:20 -04:00
|
|
|
.timeModified = backupTimeStart);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Disable storageFeatureSymLink so tablespace (and latest) symlinks will not be created
|
2021-04-07 14:04:38 -04:00
|
|
|
((Storage *)storageRepoWrite())->pub.interface.feature ^= 1 << storageFeatureSymLink;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Disable storageFeatureHardLink so hardlinks will not be created
|
2021-04-07 14:04:38 -04:00
|
|
|
((Storage *)storageRepoWrite())->pub.interface.feature ^= 1 << storageFeatureHardLink;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Run backup
|
2020-03-06 14:41:03 -05:00
|
|
|
testBackupPqScriptP(PG_VERSION_11, backupTimeStart, .walCompressType = compressTypeGz, .walTotal = 3);
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Reset storage features
|
2021-04-07 14:04:38 -04:00
|
|
|
((Storage *)storageRepoWrite())->pub.interface.feature |= 1 << storageFeatureSymLink;
|
|
|
|
((Storage *)storageRepoWrite())->pub.interface.feature |= 1 << storageFeatureHardLink;
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
|
|
|
"P00 INFO: backup start archive = 0000000105DB5DE000000000, lsn = 5db5de0/0\n"
|
2021-11-18 16:18:10 -05:00
|
|
|
"P00 INFO: check archive for segment 0000000105DB5DE000000000\n"
|
2022-02-23 13:17:14 -06:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/base/1/3 (40KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P00 WARN: invalid page checksums found in file " TEST_PATH "/pg1/base/1/3 at pages 0, 2-4\n"
|
2021-07-09 13:50:35 -04:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/base/1/4 (24KB, [PCT]) checksum [SHA1]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"P00 WARN: invalid page checksum found in file " TEST_PATH "/pg1/base/1/4 at page 1\n"
|
2022-02-23 12:05:53 -06:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/base/1/2 (8.5KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P00 WARN: page misalignment in file " TEST_PATH "/pg1/base/1/2: file size 8704 is not divisible by page size"
|
2021-05-22 09:30:54 -04:00
|
|
|
" 8192\n"
|
2021-07-09 13:50:35 -04:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/base/1/1 (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (2B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/pg_tblspc/32768/PG_11_201809051/1/5 (0B, [PCT])\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
|
|
|
"P00 INFO: backup stop archive = 0000000105DB5DE000000002, lsn = 5db5de0/280000\n"
|
|
|
|
"P00 DETAIL: wrote 'backup_label' file returned from pg_stop_backup()\n"
|
2021-11-15 14:32:22 -05:00
|
|
|
"P00 DETAIL: wrote 'tablespace_map' file returned from pg_stop_backup()\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 INFO: check archive for segment(s) 0000000105DB5DE000000000:0000000105DB5DE000000002\n"
|
2021-03-11 08:22:44 -05:00
|
|
|
"P00 DETAIL: copy segment 0000000105DB5DE000000000 to backup\n"
|
|
|
|
"P00 DETAIL: copy segment 0000000105DB5DE000000001 to backup\n"
|
|
|
|
"P00 DETAIL: copy segment 0000000105DB5DE000000002 to backup\n"
|
2021-08-11 13:39:36 -04:00
|
|
|
"P00 INFO: new backup label = 20191027-181320F\n"
|
2021-11-15 14:32:22 -05:00
|
|
|
"P00 INFO: full backup size = [SIZE], file total = 13");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 13:19:49 -04:00
|
|
|
TEST_RESULT_STR(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191027-181320F")),
|
2020-07-20 09:47:43 -04:00
|
|
|
strNewFmt(
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION.gz {file, s=2}\n"
|
|
|
|
"pg_data/backup_label.gz {file, s=17}\n"
|
|
|
|
"pg_data/base {path}\n"
|
|
|
|
"pg_data/base/1 {path}\n"
|
|
|
|
"pg_data/base/1/1.gz {file, s=8192}\n"
|
2022-02-23 12:05:53 -06:00
|
|
|
"pg_data/base/1/2.gz {file, s=8704}\n"
|
2022-02-23 13:17:14 -06:00
|
|
|
"pg_data/base/1/3.gz {file, s=40960}\n"
|
2020-07-20 09:47:43 -04:00
|
|
|
"pg_data/base/1/4.gz {file, s=24576}\n"
|
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control.gz {file, s=8192}\n"
|
|
|
|
"pg_data/pg_tblspc {path}\n"
|
|
|
|
"pg_data/pg_wal {path}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000000.gz {file, s=1048576}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000001.gz {file, s=1048576}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000002.gz {file, s=1048576}\n"
|
|
|
|
"pg_data/postgresql.conf.gz {file, s=11}\n"
|
2021-11-15 14:32:22 -05:00
|
|
|
"pg_data/tablespace_map.gz {file, s=19}\n"
|
2020-07-20 09:47:43 -04:00
|
|
|
"pg_tblspc {path}\n"
|
|
|
|
"pg_tblspc/32768 {path}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051 {path}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1 {path}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1/5.gz {file, s=0}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"pg_data={\"path\":\"" TEST_PATH "/pg1\",\"type\":\"path\"}\n"
|
2020-07-20 09:47:43 -04:00
|
|
|
"pg_tblspc/32768={\"path\":\"../../pg1-tblspc/32768\",\"tablespace-id\":\"32768\""
|
|
|
|
",\"tablespace-name\":\"tblspc32768\",\"type\":\"link\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"17ba0791499db908433b80f37c5fbc89b870084b\",\"size\":2"
|
|
|
|
",\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/backup_label={\"checksum\":\"8e6f41ac87a7514be96260d65bacbffb11be77dc\",\"size\":17"
|
|
|
|
",\"timestamp\":1572200002}\n"
|
|
|
|
"pg_data/base/1/1={\"checksum\":\"0631457264ff7f8d5fb1edc2c0211992a67c73e6\",\"checksum-page\":true"
|
2022-01-20 14:01:10 -05:00
|
|
|
",\"size\":8192,\"timestamp\":1572200000}\n"
|
2022-05-09 12:48:19 -04:00
|
|
|
"pg_data/base/1/2={\"checksum\":\"%s\",\"checksum-page\":false,\"size\":8704,\"timestamp\":1572200000}\n"
|
2022-02-23 13:17:14 -06:00
|
|
|
"pg_data/base/1/3={\"checksum\":\"%s\",\"checksum-page\":false,\"checksum-page-error\":[0,[2,4]]"
|
|
|
|
",\"size\":40960,\"timestamp\":1572200000}\n"
|
2022-01-20 14:01:10 -05:00
|
|
|
"pg_data/base/1/4={\"checksum\":\"%s\",\"checksum-page\":false,\"checksum-page-error\":[1],\"size\":24576"
|
|
|
|
",\"timestamp\":1572200000}\n"
|
2020-07-20 09:47:43 -04:00
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000000={\"size\":1048576,\"timestamp\":1572200002}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000001={\"size\":1048576,\"timestamp\":1572200002}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB5DE000000002={\"size\":1048576,\"timestamp\":1572200002}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\",\"size\":11"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
2021-11-15 14:32:22 -05:00
|
|
|
"pg_data/tablespace_map={\"checksum\":\"87fe624d7976c2144e10afcb7a9a49b071f35e9c\",\"size\":19"
|
|
|
|
",\"timestamp\":1572200002}\n"
|
2022-01-20 14:01:10 -05:00
|
|
|
"pg_tblspc/32768/PG_11_201809051/1/5={\"checksum-page\":true,\"size\":0,\"timestamp\":1572200000}\n"
|
2020-07-20 09:47:43 -04:00
|
|
|
"\n"
|
|
|
|
"[target:link]\n"
|
|
|
|
"pg_data/pg_tblspc/32768={\"destination\":\"../../pg1-tblspc/32768\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/base={}\n"
|
|
|
|
"pg_data/base/1={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_tblspc={}\n"
|
|
|
|
"pg_data/pg_wal={}\n"
|
|
|
|
"pg_tblspc={}\n"
|
|
|
|
"pg_tblspc/32768={}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051={}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1={}\n",
|
2022-05-09 12:48:19 -04:00
|
|
|
rel1_2Sha1, rel1_3Sha1, rel1_4Sha1),
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
|
|
|
|
// Remove test files
|
2021-06-08 14:51:23 -04:00
|
|
|
HRN_STORAGE_REMOVE(storagePgWrite(), "base/1/2", .errorOnMissing = true);
|
|
|
|
HRN_STORAGE_REMOVE(storagePgWrite(), "base/1/3", .errorOnMissing = true);
|
|
|
|
HRN_STORAGE_REMOVE(storagePgWrite(), "base/1/4", .errorOnMissing = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("error when pg_control not present");
|
|
|
|
|
|
|
|
{
|
|
|
|
// Load options
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeIncr);
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoHardlink, true);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-11-30 13:23:11 -05:00
|
|
|
// Preserve prior timestamp on pg_control
|
|
|
|
testBackupPqScriptP(PG_VERSION_11, BACKUP_EPOCH + 2300000, .errorAfterStart = true);
|
|
|
|
HRN_PG_CONTROL_TIME(storagePg(), backupTimeStart);
|
|
|
|
|
2019-12-13 17:14:26 -05:00
|
|
|
// Run backup
|
|
|
|
TEST_ERROR(
|
2022-05-04 12:52:05 -04:00
|
|
|
testCmdBackup(), FileMissingError,
|
2019-12-13 17:14:26 -05:00
|
|
|
"pg_control must be present in all online backups\n"
|
|
|
|
"HINT: is something wrong with the clock or filesystem timestamps?");
|
|
|
|
|
|
|
|
// Check log
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = 20191027-181320F, version = " PROJECT_VERSION "\n"
|
|
|
|
"P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
2021-11-18 16:18:10 -05:00
|
|
|
"P00 INFO: backup start archive = 0000000105DB764000000000, lsn = 5db7640/0\n"
|
|
|
|
"P00 INFO: check archive for prior segment 0000000105DB763F00000FFF");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
// Remove partial backup so it won't be resumed (since it errored before any checksums were written)
|
2021-07-15 17:00:20 -04:00
|
|
|
HRN_STORAGE_PATH_REMOVE(storageRepoWrite(), STORAGE_REPO_BACKUP "/20191027-181320F_20191028-220000I", .recurse = true);
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online 11 incr backup with tablespaces");
|
|
|
|
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 2400000;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Load options
|
|
|
|
StringList *argList = strLstNew();
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
2021-01-21 15:21:50 -05:00
|
|
|
hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 1, "/repo-bogus");
|
|
|
|
hrnCfgArgKeyRaw(argList, cfgOptRepoPath, 2, repoPath);
|
|
|
|
hrnCfgArgKeyRawZ(argList, cfgOptRepoRetentionFull, 2, "1");
|
|
|
|
hrnCfgArgKeyRawBool(argList, cfgOptRepoHardlink, 2, true);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepo, "2");
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
2021-05-03 12:15:39 -04:00
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeIncr);
|
2021-07-15 17:00:20 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptDelta, true);
|
2020-10-19 14:03:48 -04:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoHardlink, true);
|
2021-06-01 09:03:44 -04:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2020-01-21 10:29:46 -07:00
|
|
|
// Run backup. Make sure that the timeline selected converts to hexdecimal that can't be interpreted as decimal.
|
2021-11-18 16:18:10 -05:00
|
|
|
testBackupPqScriptP(PG_VERSION_11, backupTimeStart, .timeline = 0x2C, .walTotal = 2);
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: last backup label = 20191027-181320F, version = " PROJECT_VERSION "\n"
|
|
|
|
"P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
2020-01-21 10:29:46 -07:00
|
|
|
"P00 INFO: backup start archive = 0000002C05DB8EB000000000, lsn = 5db8eb0/0\n"
|
2021-11-18 16:18:10 -05:00
|
|
|
"P00 INFO: check archive for segment 0000002C05DB8EB000000000\n"
|
2020-01-21 10:29:46 -07:00
|
|
|
"P00 WARN: a timeline switch has occurred since the 20191027-181320F backup, enabling delta checksum\n"
|
2020-06-16 13:20:01 -04:00
|
|
|
" HINT: this is normal after restoring from backup or promoting a standby.\n"
|
2021-12-07 09:21:07 -05:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"P01 DETAIL: match file from prior backup " TEST_PATH "/pg1/base/1/1 (8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: match file from prior backup " TEST_PATH "/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: match file from prior backup " TEST_PATH "/pg1/PG_VERSION (2B, [PCT]) checksum [SHA1]\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 DETAIL: hardlink pg_data/PG_VERSION to 20191027-181320F\n"
|
2021-03-11 14:40:14 -05:00
|
|
|
"P00 DETAIL: hardlink pg_data/base/1/1 to 20191027-181320F\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 DETAIL: hardlink pg_data/postgresql.conf to 20191027-181320F\n"
|
|
|
|
"P00 DETAIL: hardlink pg_tblspc/32768/PG_11_201809051/1/5 to 20191027-181320F\n"
|
|
|
|
"P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
2021-11-18 16:18:10 -05:00
|
|
|
"P00 INFO: backup stop archive = 0000002C05DB8EB000000001, lsn = 5db8eb0/180000\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"P00 DETAIL: wrote 'backup_label' file returned from pg_stop_backup()\n"
|
2021-11-15 14:32:22 -05:00
|
|
|
"P00 DETAIL: wrote 'tablespace_map' file returned from pg_stop_backup()\n"
|
2021-11-18 16:18:10 -05:00
|
|
|
"P00 INFO: check archive for segment(s) 0000002C05DB8EB000000000:0000002C05DB8EB000000001\n"
|
2021-08-11 13:39:36 -04:00
|
|
|
"P00 INFO: new backup label = 20191027-181320F_20191030-014640I\n"
|
2021-11-15 14:32:22 -05:00
|
|
|
"P00 INFO: incr backup size = [SIZE], file total = 7");
|
2019-12-13 17:14:26 -05:00
|
|
|
|
2021-07-15 13:19:49 -04:00
|
|
|
TEST_RESULT_STR_Z(
|
2019-12-13 17:14:26 -05:00
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191027-181320F_20191030-014640I}\n"
|
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/PG_VERSION.gz {file, s=2}\n"
|
|
|
|
"pg_data/backup_label.gz {file, s=17}\n"
|
|
|
|
"pg_data/base {path}\n"
|
2021-03-11 14:40:14 -05:00
|
|
|
"pg_data/base/1 {path}\n"
|
|
|
|
"pg_data/base/1/1.gz {file, s=8192}\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"pg_data/global {path}\n"
|
|
|
|
"pg_data/global/pg_control.gz {file, s=8192}\n"
|
|
|
|
"pg_data/pg_tblspc {path}\n"
|
|
|
|
"pg_data/pg_tblspc/32768 {link, d=../../pg_tblspc/32768}\n"
|
|
|
|
"pg_data/pg_wal {path}\n"
|
|
|
|
"pg_data/postgresql.conf.gz {file, s=11}\n"
|
2021-11-15 14:32:22 -05:00
|
|
|
"pg_data/tablespace_map.gz {file, s=19}\n"
|
2019-12-13 17:14:26 -05:00
|
|
|
"pg_tblspc {path}\n"
|
|
|
|
"pg_tblspc/32768 {path}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051 {path}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1 {path}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_tblspc/32768/PG_11_201809051/1/5.gz {file, s=0}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
2021-05-22 09:30:54 -04:00
|
|
|
"pg_data={\"path\":\"" TEST_PATH "/pg1\",\"type\":\"path\"}\n"
|
2020-11-09 16:26:43 -05:00
|
|
|
"pg_tblspc/32768={\"path\":\"../../pg1-tblspc/32768\",\"tablespace-id\":\"32768\""
|
|
|
|
",\"tablespace-name\":\"tblspc32768\",\"type\":\"link\"}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"17ba0791499db908433b80f37c5fbc89b870084b\",\"reference\":\"20191027-181320F\""
|
|
|
|
",\"size\":2,\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/backup_label={\"checksum\":\"8e6f41ac87a7514be96260d65bacbffb11be77dc\",\"size\":17"
|
|
|
|
",\"timestamp\":1572400002}\n"
|
2021-03-11 14:40:14 -05:00
|
|
|
"pg_data/base/1/1={\"checksum\":\"0631457264ff7f8d5fb1edc2c0211992a67c73e6\",\"checksum-page\":true"
|
2022-01-20 14:01:10 -05:00
|
|
|
",\"reference\":\"20191027-181320F\",\"size\":8192,\"timestamp\":1572200000}\n"
|
2021-12-07 09:21:07 -05:00
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1572400000}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\""
|
|
|
|
",\"reference\":\"20191027-181320F\",\"size\":11,\"timestamp\":1570000000}\n"
|
2021-11-15 14:32:22 -05:00
|
|
|
"pg_data/tablespace_map={\"checksum\":\"87fe624d7976c2144e10afcb7a9a49b071f35e9c\",\"size\":19"
|
|
|
|
",\"timestamp\":1572400002}\n"
|
2022-01-20 14:01:10 -05:00
|
|
|
"pg_tblspc/32768/PG_11_201809051/1/5={\"checksum-page\":true,\"reference\":\"20191027-181320F\",\"size\":0"
|
|
|
|
",\"timestamp\":1572200000}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"\n"
|
|
|
|
"[target:link]\n"
|
|
|
|
"pg_data/pg_tblspc/32768={\"destination\":\"../../pg1-tblspc/32768\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/base={}\n"
|
2021-03-11 14:40:14 -05:00
|
|
|
"pg_data/base/1={}\n"
|
2020-03-18 10:10:10 -04:00
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_tblspc={}\n"
|
|
|
|
"pg_data/pg_wal={}\n"
|
|
|
|
"pg_tblspc={}\n"
|
|
|
|
"pg_tblspc/32768={}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051={}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1={}\n",
|
2019-12-13 17:14:26 -05:00
|
|
|
"compare file list");
|
|
|
|
}
|
2022-02-14 13:24:14 -06:00
|
|
|
|
|
|
|
// -------------------------------------------------------------------------------------------------------------------------
|
|
|
|
TEST_TITLE("online 11 full backup with tablespaces and bundles");
|
|
|
|
|
|
|
|
backupTimeStart = BACKUP_EPOCH + 2400000;
|
|
|
|
|
|
|
|
{
|
|
|
|
// Load options
|
|
|
|
StringList *argList = strLstNew();
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptStanza, "test1");
|
|
|
|
hrnCfgArgRaw(argList, cfgOptRepoPath, repoPath);
|
|
|
|
hrnCfgArgRaw(argList, cfgOptPgPath, pg1Path);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptRepoRetentionFull, "1");
|
|
|
|
hrnCfgArgRawStrId(argList, cfgOptType, backupTypeFull);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptManifestSaveThreshold, "1");
|
|
|
|
hrnCfgArgRawBool(argList, cfgOptArchiveCopy, true);
|
|
|
|
hrnCfgArgRawZ(argList, cfgOptBufferSize, "16K");
|
2022-03-14 17:49:52 -06:00
|
|
|
hrnCfgArgRawBool(argList, cfgOptRepoBundle, true);
|
2022-02-14 13:24:14 -06:00
|
|
|
HRN_CFG_LOAD(cfgCmdBackup, argList);
|
|
|
|
|
|
|
|
// Set to a smaller values than the defaults allow
|
2022-03-14 17:49:52 -06:00
|
|
|
cfgOptionSet(cfgOptRepoBundleSize, cfgSourceParam, VARINT64(PG_PAGE_SIZE_DEFAULT));
|
|
|
|
cfgOptionSet(cfgOptRepoBundleLimit, cfgSourceParam, VARINT64(PG_PAGE_SIZE_DEFAULT));
|
2022-02-14 13:24:14 -06:00
|
|
|
|
|
|
|
// Zeroed file which passes page checksums
|
|
|
|
Buffer *relation = bufNew(PG_PAGE_SIZE_DEFAULT * 3);
|
|
|
|
memset(bufPtr(relation), 0, bufSize(relation));
|
|
|
|
bufUsedSet(relation, bufSize(relation));
|
|
|
|
|
|
|
|
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/2", relation, .timeModified = backupTimeStart);
|
|
|
|
|
|
|
|
// Old files
|
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), "postgresql.auto.conf", "CONFIGSTUFF2", .timeModified = 1500000000);
|
|
|
|
HRN_STORAGE_PUT_Z(storagePgWrite(), "stuff.conf", "CONFIGSTUFF3", .timeModified = 1500000000);
|
|
|
|
|
|
|
|
// File that will get skipped while bundling smaller files and end up a bundle by itself
|
|
|
|
Buffer *bigish = bufNew(PG_PAGE_SIZE_DEFAULT - 1);
|
|
|
|
memset(bufPtr(bigish), 0, bufSize(bigish));
|
|
|
|
bufUsedSet(bigish, bufSize(bigish));
|
|
|
|
|
|
|
|
HRN_STORAGE_PUT(storagePgWrite(), "bigish.dat", bigish, .timeModified = 1500000001);
|
|
|
|
|
|
|
|
// Run backup
|
|
|
|
testBackupPqScriptP(PG_VERSION_11, backupTimeStart, .walCompressType = compressTypeGz, .walTotal = 2);
|
2022-05-04 12:52:05 -04:00
|
|
|
TEST_RESULT_VOID(testCmdBackup(), "backup");
|
2022-02-14 13:24:14 -06:00
|
|
|
|
|
|
|
TEST_RESULT_LOG(
|
|
|
|
"P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n"
|
|
|
|
"P00 INFO: backup start archive = 0000000105DB8EB000000000, lsn = 5db8eb0/0\n"
|
|
|
|
"P00 INFO: check archive for segment 0000000105DB8EB000000000\n"
|
|
|
|
"P00 DETAIL: store zero-length file " TEST_PATH "/pg1/pg_tblspc/32768/PG_11_201809051/1/5\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/base/1/2 (24KB, [PCT]) checksum [SHA1]\n"
|
2022-03-09 15:34:15 -06:00
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/stuff.conf (bundle 1/0, 12B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/postgresql.auto.conf (bundle 1/32, 12B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/postgresql.conf (bundle 1/64, 11B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (bundle 1/95, 2B, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/bigish.dat (bundle 2/0, 8.0KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/base/1/1 (bundle 3/0, 8KB, [PCT]) checksum [SHA1]\n"
|
|
|
|
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (bundle 4/0, 8KB, [PCT]) checksum [SHA1]\n"
|
2022-02-14 13:24:14 -06:00
|
|
|
"P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive\n"
|
|
|
|
"P00 INFO: backup stop archive = 0000000105DB8EB000000001, lsn = 5db8eb0/180000\n"
|
|
|
|
"P00 DETAIL: wrote 'backup_label' file returned from pg_stop_backup()\n"
|
|
|
|
"P00 DETAIL: wrote 'tablespace_map' file returned from pg_stop_backup()\n"
|
|
|
|
"P00 INFO: check archive for segment(s) 0000000105DB8EB000000000:0000000105DB8EB000000001\n"
|
|
|
|
"P00 DETAIL: copy segment 0000000105DB8EB000000000 to backup\n"
|
|
|
|
"P00 DETAIL: copy segment 0000000105DB8EB000000001 to backup\n"
|
|
|
|
"P00 INFO: new backup label = 20191030-014640F\n"
|
|
|
|
"P00 INFO: full backup size = [SIZE], file total = 13");
|
|
|
|
|
|
|
|
TEST_RESULT_STR_Z(
|
|
|
|
testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")),
|
|
|
|
". {link, d=20191030-014640F}\n"
|
|
|
|
"bundle {path}\n"
|
2022-02-17 07:25:12 -06:00
|
|
|
"bundle/1/pg_data/PG_VERSION {file, s=2}\n"
|
|
|
|
"bundle/1/pg_data/postgresql.auto.conf {file, s=12}\n"
|
|
|
|
"bundle/1/pg_data/postgresql.conf {file, s=11}\n"
|
|
|
|
"bundle/1/pg_data/stuff.conf {file, s=12}\n"
|
|
|
|
"bundle/2/pg_data/bigish.dat {file, s=8191}\n"
|
|
|
|
"bundle/3/pg_data/base/1/1 {file, s=8192}\n"
|
|
|
|
"bundle/4/pg_data/global/pg_control {file, s=8192}\n"
|
2022-02-14 13:24:14 -06:00
|
|
|
"pg_data {path}\n"
|
|
|
|
"pg_data/backup_label.gz {file, s=17}\n"
|
2022-02-17 07:25:12 -06:00
|
|
|
"pg_data/base {path}\n"
|
|
|
|
"pg_data/base/1 {path}\n"
|
|
|
|
"pg_data/base/1/2.gz {file, s=24576}\n"
|
2022-02-14 13:24:14 -06:00
|
|
|
"pg_data/pg_wal {path}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB8EB000000000.gz {file, s=1048576}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB8EB000000001.gz {file, s=1048576}\n"
|
|
|
|
"pg_data/tablespace_map.gz {file, s=19}\n"
|
|
|
|
"--------\n"
|
|
|
|
"[backup:target]\n"
|
|
|
|
"pg_data={\"path\":\"" TEST_PATH "/pg1\",\"type\":\"path\"}\n"
|
|
|
|
"pg_tblspc/32768={\"path\":\"../../pg1-tblspc/32768\",\"tablespace-id\":\"32768\""
|
|
|
|
",\"tablespace-name\":\"tblspc32768\",\"type\":\"link\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:file]\n"
|
|
|
|
"pg_data/PG_VERSION={\"checksum\":\"17ba0791499db908433b80f37c5fbc89b870084b\",\"size\":2"
|
|
|
|
",\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/backup_label={\"checksum\":\"8e6f41ac87a7514be96260d65bacbffb11be77dc\",\"size\":17"
|
|
|
|
",\"timestamp\":1572400002}\n"
|
|
|
|
"pg_data/base/1/1={\"checksum\":\"0631457264ff7f8d5fb1edc2c0211992a67c73e6\",\"checksum-page\":true,\"size\":8192"
|
|
|
|
",\"timestamp\":1572200000}\n"
|
|
|
|
"pg_data/base/1/2={\"checksum\":\"ebdd38b69cd5b9f2d00d273c981e16960fbbb4f7\",\"checksum-page\":true,\"size\":24576"
|
|
|
|
",\"timestamp\":1572400000}\n"
|
|
|
|
"pg_data/bigish.dat={\"checksum\":\"3e5175386be683d2f231f3fa3eab892a799082f7\",\"size\":8191"
|
|
|
|
",\"timestamp\":1500000001}\n"
|
|
|
|
"pg_data/global/pg_control={\"size\":8192,\"timestamp\":1572400000}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB8EB000000000={\"size\":1048576,\"timestamp\":1572400002}\n"
|
|
|
|
"pg_data/pg_wal/0000000105DB8EB000000001={\"size\":1048576,\"timestamp\":1572400002}\n"
|
|
|
|
"pg_data/postgresql.auto.conf={\"checksum\":\"e873a5cb5a67e48761e7b619c531311404facdce\",\"size\":12"
|
|
|
|
",\"timestamp\":1500000000}\n"
|
|
|
|
"pg_data/postgresql.conf={\"checksum\":\"e3db315c260e79211b7b52587123b7aa060f30ab\",\"size\":11"
|
|
|
|
",\"timestamp\":1570000000}\n"
|
|
|
|
"pg_data/stuff.conf={\"checksum\":\"55a9d0d18b77789c7722abe72aa905e2dc85bb5d\",\"size\":12"
|
|
|
|
",\"timestamp\":1500000000}\n"
|
|
|
|
"pg_data/tablespace_map={\"checksum\":\"87fe624d7976c2144e10afcb7a9a49b071f35e9c\",\"size\":19"
|
|
|
|
",\"timestamp\":1572400002}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1/5={\"checksum-page\":true,\"size\":0,\"timestamp\":1572200000}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:link]\n"
|
|
|
|
"pg_data/pg_tblspc/32768={\"destination\":\"../../pg1-tblspc/32768\"}\n"
|
|
|
|
"\n"
|
|
|
|
"[target:path]\n"
|
|
|
|
"pg_data={}\n"
|
|
|
|
"pg_data/base={}\n"
|
|
|
|
"pg_data/base/1={}\n"
|
|
|
|
"pg_data/global={}\n"
|
|
|
|
"pg_data/pg_tblspc={}\n"
|
|
|
|
"pg_data/pg_wal={}\n"
|
|
|
|
"pg_tblspc={}\n"
|
|
|
|
"pg_tblspc/32768={}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051={}\n"
|
|
|
|
"pg_tblspc/32768/PG_11_201809051/1={}\n",
|
|
|
|
"compare file list");
|
|
|
|
}
|
2019-12-13 17:14:26 -05:00
|
|
|
}
|
|
|
|
|
2021-03-10 18:42:22 -05:00
|
|
|
FUNCTION_HARNESS_RETURN_VOID();
|
2019-07-25 14:34:16 -04:00
|
|
|
}
|