1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2025-11-06 08:49:29 +02:00

Add support for alternate compile-time page sizes.

Alternate pages sizes can be selected at compile-time, .e.g. 4096. While compile-time settings are generally not well tested by core, some established forks such as Greenplum use them.
This commit is contained in:
Viktor Kurilko
2023-12-14 19:28:52 +03:00
committed by GitHub
parent d205a61949
commit 89d5278b74
17 changed files with 449 additions and 353 deletions

View File

@@ -1,2 +1,16 @@
<release date="XXXX-XX-XX" version="2.50dev" title="Under Development">
<release-core-list>
<release-improvement-list>
<release-item>
<github-pull-request id="2197"/>
<release-item-contributor-list>
<release-item-contributor id="viktor.kurilko"/>
<release-item-reviewer id="david.steele"/>
</release-item-contributor-list>
<p>Add support for alternate compile-time page sizes.</p>
</release-item>
</release-improvement-list>
</release-core-list>
</release>

View File

@@ -999,6 +999,11 @@
<contributor-id type="github">ntrvic</contributor-id>
</contributor>
<contributor id="viktor.kurilko">
<contributor-name-display>Viktor Kurilko</contributor-name-display>
<contributor-id type="github">KnightMurloc</contributor-id>
</contributor>
<contributor id="viorel.tabara">
<contributor-name-display>Viorel Tabara</contributor-name-display>
</contributor>

View File

@@ -167,6 +167,7 @@ typedef struct BackupData
unsigned int timeline; // Primary timeline
unsigned int version; // PostgreSQL version
unsigned int walSegmentSize; // PostgreSQL wal segment size
PgPageSize pageSize; // PostgreSQL page size
} BackupData;
static BackupData *
@@ -229,6 +230,7 @@ backupInit(const InfoBackup *const infoBackup)
result->timeline = pgControl.timeline;
result->version = pgControl.version;
result->walSegmentSize = pgControl.walSegmentSize;
result->pageSize = pgControl.pageSize;
// Validate pg_control info against the stanza
if (result->version != infoPg.version || pgControl.systemId != infoPg.systemId)
@@ -1383,8 +1385,8 @@ Log the results of a job and throw errors
static void
backupJobResult(
Manifest *const manifest, const String *const host, const Storage *const storagePg, StringList *const fileRemove,
ProtocolParallelJob *const job, const bool bundle, const uint64_t sizeTotal, uint64_t *const sizeProgress,
unsigned int *const currentPercentComplete)
ProtocolParallelJob *const job, const bool bundle, const PgPageSize pageSize, const uint64_t sizeTotal,
uint64_t *const sizeProgress, unsigned int *const currentPercentComplete)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(MANIFEST, manifest);
@@ -1393,6 +1395,7 @@ backupJobResult(
FUNCTION_LOG_PARAM(STRING_LIST, fileRemove);
FUNCTION_LOG_PARAM(PROTOCOL_PARALLEL_JOB, job);
FUNCTION_LOG_PARAM(BOOL, bundle);
FUNCTION_LOG_PARAM(ENUM, pageSize);
FUNCTION_LOG_PARAM(UINT64, sizeTotal);
FUNCTION_LOG_PARAM_P(UINT64, sizeProgress);
FUNCTION_LOG_PARAM_P(UINT, currentPercentComplete);
@@ -1513,7 +1516,7 @@ backupJobResult(
// ??? Update formatting after migration
LOG_WARN_FMT(
"page misalignment in file %s: file size %" PRIu64 " is not divisible by page size %u",
strZ(fileLog), copySize, PG_PAGE_SIZE_DEFAULT);
strZ(fileLog), copySize, pageSize);
}
else
{
@@ -1676,6 +1679,7 @@ typedef struct BackupJobData
RegExp *standbyExp; // Identify files that may be copied from the standby
const CipherType cipherType; // Cipher type
const String *const cipherSubPass; // Passphrase used to encrypt files in the backup
const PgPageSize pageSize; // Page size
const CompressType compressType; // Backup compression type
const int compressLevel; // Compress level if backup is compressed
const bool delta; // Is this a checksum delta backup?
@@ -1990,6 +1994,7 @@ backupJobCallback(void *const data, const unsigned int clientIdx)
pckWriteI32P(param, jobData->compressLevel);
pckWriteU64P(param, jobData->cipherSubPass == NULL ? cipherTypeNone : cipherTypeAes256Cbc);
pckWriteStrP(param, jobData->cipherSubPass);
pckWriteU32P(param, jobData->pageSize);
pckWriteStrP(param, cfgOptionStrNull(cfgOptPgVersionForce));
}
@@ -2098,6 +2103,7 @@ backupProcess(const BackupData *const backupData, Manifest *const manifest, cons
.compressLevel = cfgOptionInt(cfgOptCompressLevel),
.cipherType = cfgOptionStrId(cfgOptRepoCipherType),
.cipherSubPass = manifestCipherSubPass(manifest),
.pageSize = backupData->pageSize,
.delta = cfgOptionBool(cfgOptDelta),
.bundle = cfgOptionBool(cfgOptRepoBundle),
.bundleId = 1,
@@ -2213,7 +2219,7 @@ backupProcess(const BackupData *const backupData, Manifest *const manifest, cons
manifest,
backupStandby && protocolParallelJobProcessId(job) > 1 ? backupData->hostStandby : backupData->hostPrimary,
protocolParallelJobProcessId(job) > 1 ? storagePgIdx(pgIdx) : backupData->storagePrimary,
fileRemove, job, jobData.bundle, sizeTotal, &sizeProgress, &currentPercentComplete);
fileRemove, job, jobData.bundle, jobData.pageSize, sizeTotal, &sizeProgress, &currentPercentComplete);
}
// A keep-alive is required here for the remote holding open the backup connection

View File

@@ -20,7 +20,6 @@ Backup File
#include "common/type/convert.h"
#include "common/type/json.h"
#include "info/manifest.h"
#include "postgres/interface.h"
#include "storage/helper.h"
/***********************************************************************************************************************************
@@ -42,7 +41,7 @@ FN_EXTERN List *
backupFile(
const String *const repoFile, const uint64_t bundleId, const bool bundleRaw, const unsigned int blockIncrReference,
const CompressType repoFileCompressType, const int repoFileCompressLevel, const CipherType cipherType,
const String *const cipherPass, const String *const pgVersionForce, const List *const fileList)
const String *const cipherPass, const String *const pgVersionForce, const PgPageSize pageSize, const List *const fileList)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STRING, repoFile); // Repo file
@@ -53,6 +52,7 @@ backupFile(
FUNCTION_LOG_PARAM(INT, repoFileCompressLevel); // Compression level for repo file
FUNCTION_LOG_PARAM(STRING_ID, cipherType); // Encryption type
FUNCTION_TEST_PARAM(STRING, cipherPass); // Password to access the repo file if encrypted
FUNCTION_LOG_PARAM(ENUM, pageSize); // Page size
FUNCTION_LOG_PARAM(STRING, pgVersionForce); // Force pg version
FUNCTION_LOG_PARAM(LIST, fileList); // List of files to backup
FUNCTION_LOG_END();
@@ -60,6 +60,7 @@ backupFile(
ASSERT(repoFile != NULL);
ASSERT((cipherType == cipherTypeNone && cipherPass == NULL) || (cipherType != cipherTypeNone && cipherPass != NULL));
ASSERT(fileList != NULL && !lstEmpty(fileList));
ASSERT(pgPageSizeValid(pageSize));
// Backup file results
List *const result = lstNewP(sizeof(BackupFileResult));
@@ -219,8 +220,8 @@ backupFile(
ioFilterGroupAdd(
ioReadFilterGroup(readIo),
pageChecksumNew(
segmentNumber(file->pgFile), PG_SEGMENT_PAGE_DEFAULT, file->pgFilePageHeaderCheck,
storagePathP(storagePg(), file->pgFile)));
segmentNumber(file->pgFile), PG_SEGMENT_SIZE_DEFAULT / pageSize, pageSize,
file->pgFilePageHeaderCheck, storagePathP(storagePg(), file->pgFile)));
}
// Compress filter

View File

@@ -7,6 +7,7 @@ Backup File
#include "common/compress/helper.h"
#include "common/crypto/common.h"
#include "common/type/keyValue.h"
#include "postgres/interface.h"
/***********************************************************************************************************************************
Backup file types
@@ -62,6 +63,7 @@ typedef struct BackupFileResult
FN_EXTERN List *backupFile(
const String *repoFile, uint64_t bundleId, bool bundleRaw, unsigned int blockIncrReference, CompressType repoFileCompressType,
int repoFileCompressLevel, CipherType cipherType, const String *cipherPass, const String *pgVersionForce, const List *fileList);
int repoFileCompressLevel, CipherType cipherType, const String *cipherPass, const String *pgVersionForce, PgPageSize pageSize,
const List *fileList);
#endif

View File

@@ -10,7 +10,6 @@ Page Checksum Filter
#include "common/macro.h"
#include "common/type/json.h"
#include "common/type/object.h"
#include "postgres/interface.h"
#include "postgres/interface/static.vendor.h"
#include "storage/posix/storage.h"
@@ -20,6 +19,7 @@ Object type
typedef struct PageChecksum
{
unsigned int segmentPageTotal; // Total pages in a segment
PgPageSize pageSize; // Page size
unsigned int pageNoOffset; // Page number offset for subsequent segments
bool headerCheck; // Perform additional header checks?
const String *fileName; // Used to load the file to retry pages
@@ -62,10 +62,10 @@ pageChecksumProcess(THIS_VOID, const Buffer *const input)
ASSERT(input != NULL);
// Calculate total pages in the buffer
unsigned int pageTotal = (unsigned int)(bufUsed(input) / PG_PAGE_SIZE_DEFAULT);
unsigned int pageTotal = (unsigned int)(bufUsed(input) / this->pageSize);
// If there is a partial page make sure there is enough of it to validate the checksum
const unsigned int pageRemainder = (unsigned int)(bufUsed(input) % PG_PAGE_SIZE_DEFAULT);
const unsigned int pageRemainder = (unsigned int)(bufUsed(input) % this->pageSize);
if (pageRemainder != 0)
{
@@ -92,7 +92,7 @@ pageChecksumProcess(THIS_VOID, const Buffer *const input)
for (unsigned int pageIdx = 0; pageIdx < pageTotal; pageIdx++)
{
// Get a pointer to the page header
const PageHeaderData *const pageHeader = (const PageHeaderData *)(bufPtrConst(input) + pageIdx * PG_PAGE_SIZE_DEFAULT);
const PageHeaderData *const pageHeader = (const PageHeaderData *)(bufPtrConst(input) + pageIdx * this->pageSize);
// Block number relative to all segments in the relation
const unsigned int blockNo = this->pageNoOffset + pageIdx;
@@ -107,7 +107,7 @@ pageChecksumProcess(THIS_VOID, const Buffer *const input)
if ((this->headerCheck && pageHeader->pd_upper == 0) || (!this->headerCheck && pageHeader->pd_checksum == 0))
{
// Check that the entire page is zero
for (unsigned int pageIdx = 0; pageIdx < PG_PAGE_SIZE_DEFAULT / sizeof(size_t); pageIdx++)
for (unsigned int pageIdx = 0; pageIdx < this->pageSize / sizeof(size_t); pageIdx++)
{
if (((size_t *)pageHeader)[pageIdx] != 0)
{
@@ -125,10 +125,10 @@ pageChecksumProcess(THIS_VOID, const Buffer *const input)
if (pageValid)
{
// Make a copy of the page since it will be modified by the page checksum function
memcpy(this->pageBuffer, pageHeader, PG_PAGE_SIZE_DEFAULT);
memcpy(this->pageBuffer, pageHeader, this->pageSize);
// Continue if the checksum matches
if (pageHeader->pd_checksum == pgPageChecksum(this->pageBuffer, blockNo))
if (pageHeader->pd_checksum == pgPageChecksum(this->pageBuffer, blockNo, this->pageSize))
continue;
}
@@ -141,11 +141,11 @@ pageChecksumProcess(THIS_VOID, const Buffer *const input)
const Buffer *const pageRetry = storageGetP(
storageNewReadP(
storagePosixNewP(FSLASH_STR), this->fileName,
.offset = (blockNo % this->segmentPageTotal) * PG_PAGE_SIZE_DEFAULT,
.limit = VARUINT64(PG_PAGE_SIZE_DEFAULT)));
.offset = (uint64_t)(blockNo % this->segmentPageTotal) * this->pageSize,
.limit = VARUINT64(this->pageSize)));
// Check if the page has changed since it was last read
changed = !bufEq(pageRetry, BUF(pageHeader, PG_PAGE_SIZE_DEFAULT));
changed = !bufEq(pageRetry, BUF(pageHeader, this->pageSize));
}
MEM_CONTEXT_TEMP_END();
@@ -226,24 +226,30 @@ pageChecksumResult(THIS_VOID)
/**********************************************************************************************************************************/
FN_EXTERN IoFilter *
pageChecksumNew(
const unsigned int segmentNo, const unsigned int segmentPageTotal, const bool headerCheck, const String *const fileName)
const unsigned int segmentNo, const unsigned int segmentPageTotal, const PgPageSize pageSize, const bool headerCheck,
const String *const fileName)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(UINT, segmentNo);
FUNCTION_LOG_PARAM(UINT, segmentPageTotal);
FUNCTION_LOG_PARAM(ENUM, pageSize);
FUNCTION_LOG_PARAM(BOOL, headerCheck);
FUNCTION_LOG_PARAM(STRING, fileName);
FUNCTION_LOG_END();
ASSERT(pgPageSizeValid(pageSize));
ASSERT(segmentPageTotal > 0 && segmentPageTotal % pageSize == 0);
OBJ_NEW_BEGIN(PageChecksum, .childQty = MEM_CONTEXT_QTY_MAX)
{
*this = (PageChecksum)
{
.segmentPageTotal = segmentPageTotal,
.pageSize = pageSize,
.pageNoOffset = segmentNo * segmentPageTotal,
.headerCheck = headerCheck,
.fileName = strDup(fileName),
.pageBuffer = bufPtr(bufNew(PG_PAGE_SIZE_DEFAULT)),
.pageBuffer = bufPtr(bufNew(pageSize)),
.valid = true,
.align = true,
};
@@ -259,6 +265,7 @@ pageChecksumNew(
pckWriteU32P(packWrite, segmentNo);
pckWriteU32P(packWrite, segmentPageTotal);
pckWriteU32P(packWrite, pageSize);
pckWriteBoolP(packWrite, headerCheck);
pckWriteStrP(packWrite, fileName);
pckWriteEndP(packWrite);
@@ -282,10 +289,11 @@ pageChecksumNewPack(const Pack *const paramList)
PackRead *const paramListPack = pckReadNew(paramList);
const unsigned int segmentNo = pckReadU32P(paramListPack);
const unsigned int segmentPageTotal = pckReadU32P(paramListPack);
const PgPageSize pageSize = (PgPageSize)pckReadU32P(paramListPack);
const bool headerCheck = pckReadBoolP(paramListPack);
const String *const fileName = pckReadStrP(paramListPack);
result = ioFilterMove(pageChecksumNew(segmentNo, segmentPageTotal, headerCheck, fileName), memContextPrior());
result = ioFilterMove(pageChecksumNew(segmentNo, segmentPageTotal, pageSize, headerCheck, fileName), memContextPrior());
}
MEM_CONTEXT_TEMP_END();

View File

@@ -7,6 +7,7 @@ Check all pages in a PostgreSQL relation to ensure the checksums are valid.
#define COMMAND_BACKUP_PAGE_CHECKSUM_H
#include "common/io/filter/filter.h"
#include "postgres/interface.h"
/***********************************************************************************************************************************
Filter type constant
@@ -17,7 +18,7 @@ Filter type constant
Constructors
***********************************************************************************************************************************/
FN_EXTERN IoFilter *pageChecksumNew(
unsigned int segmentNo, unsigned int segmentPageTotal, bool headerCheck, const String *fileName);
unsigned int segmentNo, unsigned int segmentPageTotal, PgPageSize pageSize, bool headerCheck, const String *fileName);
FN_EXTERN IoFilter *pageChecksumNewPack(const Pack *paramList);
#endif

View File

@@ -36,6 +36,7 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
const int repoFileCompressLevel = pckReadI32P(param);
const CipherType cipherType = (CipherType)pckReadU64P(param);
const String *const cipherPass = pckReadStrP(param);
const PgPageSize pageSize = pckReadU32P(param);
const String *const pgVersionForce = pckReadStrP(param);
// Build the file list
@@ -78,7 +79,7 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
// Backup file
const List *const result = backupFile(
repoFile, bundleId, bundleRaw, blockIncrReference, repoFileCompressType, repoFileCompressLevel, cipherType, cipherPass,
pgVersionForce, fileList);
pgVersionForce, pageSize, fileList);
// Return result
PackWrite *const resultPack = protocolPackNew();

View File

@@ -274,8 +274,7 @@ pgControlFromBuffer(const Buffer *controlFile, const String *const pgVersionForc
pgWalSegmentSizeCheck(result.version, result.walSegmentSize);
// Check the page size
if (result.pageSize != PG_PAGE_SIZE_DEFAULT)
THROW_FMT(FormatError, "page size is %u but must be %u", result.pageSize, PG_PAGE_SIZE_DEFAULT);
pgPageSizeCheck(result.pageSize);
FUNCTION_LOG_RETURN(PG_CONTROL, result);
}

View File

@@ -61,11 +61,17 @@ STRING_DECLARE(PG_NAME_WAL_STR);
STRING_DECLARE(PG_NAME_XLOG_STR);
/***********************************************************************************************************************************
Define default page size
Page size can only be changed at compile time and is not known to be well-tested, so only the default page size is supported.
Define allowed page sizes
***********************************************************************************************************************************/
#define PG_PAGE_SIZE_DEFAULT ((unsigned int)(8 * 1024))
typedef enum
{
pgPageSize1 = 1 * 1024,
pgPageSize2 = 2 * 1024,
pgPageSize4 = 4 * 1024,
pgPageSize8 = 8 * 1024,
pgPageSize16 = 16 * 1024,
pgPageSize32 = 32 * 1024,
} PgPageSize;
/***********************************************************************************************************************************
Define default segment size and pages per segment
@@ -73,7 +79,6 @@ Define default segment size and pages per segment
Segment size can only be changed at compile time and is not known to be well-tested, so only the default segment size is supported.
***********************************************************************************************************************************/
#define PG_SEGMENT_SIZE_DEFAULT ((unsigned int)(1 * 1024 * 1024 * 1024))
#define PG_SEGMENT_PAGE_DEFAULT (PG_SEGMENT_SIZE_DEFAULT / PG_PAGE_SIZE_DEFAULT)
/***********************************************************************************************************************************
WAL header size. It doesn't seem worth tracking the exact size of the WAL header across versions of PostgreSQL so just set it to
@@ -101,7 +106,7 @@ typedef struct PgControl
uint64_t checkpoint; // Last checkpoint LSN
uint32_t timeline; // Current timeline
unsigned int pageSize;
PgPageSize pageSize;
unsigned int walSegmentSize;
bool pageChecksum;
@@ -165,7 +170,13 @@ FN_EXTERN StringList *pgLsnRangeToWalSegmentList(
FN_EXTERN const String *pgLsnName(unsigned int pgVersion);
// Calculate the checksum for a page. Page cannot be const because the page header is temporarily modified during processing.
FN_EXTERN uint16_t pgPageChecksum(unsigned char *page, uint32_t blockNo);
FN_EXTERN uint16_t pgPageChecksum(unsigned char *page, uint32_t blockNo, PgPageSize pageSize);
// Returns true if page size is valid, false otherwise
FN_EXTERN bool pgPageSizeValid(PgPageSize pageSize);
// Throws an error if page size is not valid
FN_EXTERN void pgPageSizeCheck(PgPageSize pageSize);
FN_EXTERN const String *pgWalName(unsigned int pgVersion);

View File

@@ -1,5 +1,7 @@
/***********************************************************************************************************************************
PostgreSQL Page Interface
PostgreSQL Page Checksum
Adapted from PostgreSQL src/include/storage/checksum_impl.h.
***********************************************************************************************************************************/
#include "build.auto.h"
@@ -8,13 +10,144 @@ PostgreSQL Page Interface
#include "postgres/interface/static.vendor.h"
/***********************************************************************************************************************************
Include the page checksum code
Page checksum calculation
***********************************************************************************************************************************/
#include "postgres/interface/pageChecksum.vendor.c.inc"
// Number of checksums to calculate in parallel
#define PARALLEL_SUM 32
// Prime multiplier of FNV-1a hash
#define FNV_PRIME 16777619
// Calculate one round of the checksum
#define CHECKSUM_ROUND(checksum, value) \
do \
{ \
const uint32_t tmp = (checksum) ^ (value); \
checksum = tmp * FNV_PRIME ^ (tmp >> 17); \
} while (0)
// Main calculation loop
#define CHECKSUM_CASE(pageSize) \
case pageSize: \
for (uint32_t i = 0; i < (uint32) (pageSize / (sizeof(uint32) * PARALLEL_SUM)); i++) \
for (uint32_t j = 0; j < PARALLEL_SUM; j++) \
CHECKSUM_ROUND(sums[j], ((PgPageChecksum##pageSize *)page)->data[i][j]); \
\
break;
/***********************************************************************************************************************************
Define unions that will make the code valid under strict aliasing for each page size
***********************************************************************************************************************************/
#define CHECKSUM_UNION(pageSize) \
typedef union \
{ \
PageHeaderData phdr; \
uint32_t data[pageSize / (sizeof(uint32_t) * PARALLEL_SUM)][PARALLEL_SUM]; \
} PgPageChecksum##pageSize;
CHECKSUM_UNION(pgPageSize1);
CHECKSUM_UNION(pgPageSize2);
CHECKSUM_UNION(pgPageSize4);
CHECKSUM_UNION(pgPageSize8);
CHECKSUM_UNION(pgPageSize16);
CHECKSUM_UNION(pgPageSize32);
/**********************************************************************************************************************************/
FN_EXTERN uint16_t
pgPageChecksum(unsigned char *page, uint32_t blockNo)
pgPageChecksum(unsigned char *const page, const uint32_t blockNo, const PgPageSize pageSize)
{
return pg_checksum_page((char *)page, blockNo);
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM_P(UCHARDATA, page);
FUNCTION_TEST_PARAM(UINT, blockNo);
FUNCTION_TEST_PARAM(ENUM, pageSize);
FUNCTION_TEST_END();
// Save pd_checksum and temporarily set it to zero, so that the checksum calculation isn't affected by the old checksum stored
// on the page. Restore it after, because actually updating the checksum is NOT part of the API of this function.
const uint16_t checksumPrior = ((PageHeaderData *)page)->pd_checksum;
((PageHeaderData *)page)->pd_checksum = 0;
// Initialize partial checksums to their corresponding offsets
uint32_t sums[PARALLEL_SUM] =
{
0x5b1f36e9, 0xb8525960, 0x02ab50aa, 0x1de66d2a, 0x79ff467a, 0x9bb9f8a3, 0x217e7cd2, 0x83e13d2c,
0xf8d4474f, 0xe39eb970, 0x42c6ae16, 0x993216fa, 0x7b093b5d, 0x98daff3c, 0xf718902a, 0x0b1c9cdb,
0xe58f764b, 0x187636bc, 0x5d7b3bb1, 0xe73de7de, 0x92bec979, 0xcca6c0b2, 0x304a0979, 0x85aa43d4,
0x783125bb, 0x6ca8eaa2, 0xe407eac6, 0x4b5cfc3e, 0x9fbf8c76, 0x15ca20be, 0xf2ca9fd3, 0x959bd756,
};
// Main checksum calculation
switch (pageSize)
{
CHECKSUM_CASE(pgPageSize8); // Default page size should be checked first
CHECKSUM_CASE(pgPageSize1);
CHECKSUM_CASE(pgPageSize2);
CHECKSUM_CASE(pgPageSize4);
CHECKSUM_CASE(pgPageSize16);
CHECKSUM_CASE(pgPageSize32);
default:
pgPageSizeCheck(pageSize);
}
// Add in two rounds of zeroes for additional mixing
for (uint32_t i = 0; i < 2; i++)
for (uint32_t j = 0; j < PARALLEL_SUM; j++)
CHECKSUM_ROUND(sums[j], 0);
// Xor fold partial checksums together
uint32 result = 0;
for (uint32_t i = 0; i < PARALLEL_SUM; i++)
result ^= sums[i];
// Restore prior checksum
((PageHeaderData *)page)->pd_checksum = checksumPrior;
// Mix in the block number to detect transposed pages
result ^= blockNo;
// Reduce to a uint16 (to fit in the pd_checksum field) with an offset of one. That avoids checksums of zero, which seems like a
// good idea.
FUNCTION_TEST_RETURN(UINT16, (uint16_t)((result % 65535) + 1));
}
/**********************************************************************************************************************************/
FN_EXTERN bool
pgPageSizeValid(const PgPageSize pageSize)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(ENUM, pageSize);
FUNCTION_TEST_END();
switch (pageSize)
{
case pgPageSize1:
case pgPageSize2:
case pgPageSize4:
case pgPageSize8:
case pgPageSize16:
case pgPageSize32:
FUNCTION_TEST_RETURN(BOOL, true);
}
FUNCTION_TEST_RETURN(BOOL, false);
}
/**********************************************************************************************************************************/
FN_EXTERN void
pgPageSizeCheck(const PgPageSize pageSize)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(ENUM, pageSize);
FUNCTION_TEST_END();
if (!pgPageSizeValid(pageSize))
{
THROW_FMT(
FormatError, "page size is %u but only %i, %i, %i, %i, %i, and %i are supported", pageSize, pgPageSize1, pgPageSize2,
pgPageSize4, pgPageSize8, pgPageSize16, pgPageSize32);
}
FUNCTION_TEST_RETURN_VOID();
}

View File

@@ -1,227 +0,0 @@
/***********************************************************************************************************************************
PostgreSQL Page Checksum Algorithm
For each supported release of PostgreSQL check the code in this file to see if it has changed. The easiest way to do this is to
copy and paste in place and check git to see if there are any diffs. Tabs should be copied as is to make this process easy even
though the pgBackRest project does not use tabs elsewhere.
Since the checksum implementation and page format do not (yet) change between versions this code should be copied verbatim from
src/include/storage/checksum_impl.h for each new release. Only the newest released version of the code should be used.
Modifications need to be made after copying:
1) Remove `#include "storage/bufpage.h"`.
2) Make pg_checksum_page() static.
3) Remove Assert(!PageIsNew(&cpage->phdr)).
***********************************************************************************************************************************/
/*-------------------------------------------------------------------------
*
* checksum_impl.h
* Checksum implementation for data pages.
*
* This file exists for the benefit of external programs that may wish to
* check Postgres page checksums. They can #include this to get the code
* referenced by storage/checksum.h. (Note: you may need to redefine
* Assert() as empty to compile this successfully externally.)
*
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/storage/checksum_impl.h
*
*-------------------------------------------------------------------------
*/
/*
* The algorithm used to checksum pages is chosen for very fast calculation.
* Workloads where the database working set fits into OS file cache but not
* into shared buffers can read in pages at a very fast pace and the checksum
* algorithm itself can become the largest bottleneck.
*
* The checksum algorithm itself is based on the FNV-1a hash (FNV is shorthand
* for Fowler/Noll/Vo). The primitive of a plain FNV-1a hash folds in data 1
* byte at a time according to the formula:
*
* hash = (hash ^ value) * FNV_PRIME
*
* FNV-1a algorithm is described at http://www.isthe.com/chongo/tech/comp/fnv/
*
* PostgreSQL doesn't use FNV-1a hash directly because it has bad mixing of
* high bits - high order bits in input data only affect high order bits in
* output data. To resolve this we xor in the value prior to multiplication
* shifted right by 17 bits. The number 17 was chosen because it doesn't
* have common denominator with set bit positions in FNV_PRIME and empirically
* provides the fastest mixing for high order bits of final iterations quickly
* avalanche into lower positions. For performance reasons we choose to combine
* 4 bytes at a time. The actual hash formula used as the basis is:
*
* hash = (hash ^ value) * FNV_PRIME ^ ((hash ^ value) >> 17)
*
* The main bottleneck in this calculation is the multiplication latency. To
* hide the latency and to make use of SIMD parallelism multiple hash values
* are calculated in parallel. The page is treated as a 32 column two
* dimensional array of 32 bit values. Each column is aggregated separately
* into a partial checksum. Each partial checksum uses a different initial
* value (offset basis in FNV terminology). The initial values actually used
* were chosen randomly, as the values themselves don't matter as much as that
* they are different and don't match anything in real data. After initializing
* partial checksums each value in the column is aggregated according to the
* above formula. Finally two more iterations of the formula are performed with
* value 0 to mix the bits of the last value added.
*
* The partial checksums are then folded together using xor to form a single
* 32-bit checksum. The caller can safely reduce the value to 16 bits
* using modulo 2^16-1. That will cause a very slight bias towards lower
* values but this is not significant for the performance of the
* checksum.
*
* The algorithm choice was based on what instructions are available in SIMD
* instruction sets. This meant that a fast and good algorithm needed to use
* multiplication as the main mixing operator. The simplest multiplication
* based checksum primitive is the one used by FNV. The prime used is chosen
* for good dispersion of values. It has no known simple patterns that result
* in collisions. Test of 5-bit differentials of the primitive over 64bit keys
* reveals no differentials with 3 or more values out of 100000 random keys
* colliding. Avalanche test shows that only high order bits of the last word
* have a bias. Tests of 1-4 uncorrelated bit errors, stray 0 and 0xFF bytes,
* overwriting page from random position to end with 0 bytes, and overwriting
* random segments of page with 0x00, 0xFF and random data all show optimal
* 2e-16 false positive rate within margin of error.
*
* Vectorization of the algorithm requires 32bit x 32bit -> 32bit integer
* multiplication instruction. As of 2013 the corresponding instruction is
* available on x86 SSE4.1 extensions (pmulld) and ARM NEON (vmul.i32).
* Vectorization requires a compiler to do the vectorization for us. For recent
* GCC versions the flags -msse4.1 -funroll-loops -ftree-vectorize are enough
* to achieve vectorization.
*
* The optimal amount of parallelism to use depends on CPU specific instruction
* latency, SIMD instruction width, throughput and the amount of registers
* available to hold intermediate state. Generally, more parallelism is better
* up to the point that state doesn't fit in registers and extra load-store
* instructions are needed to swap values in/out. The number chosen is a fixed
* part of the algorithm because changing the parallelism changes the checksum
* result.
*
* The parallelism number 32 was chosen based on the fact that it is the
* largest state that fits into architecturally visible x86 SSE registers while
* leaving some free registers for intermediate values. For future processors
* with 256bit vector registers this will leave some performance on the table.
* When vectorization is not available it might be beneficial to restructure
* the computation to calculate a subset of the columns at a time and perform
* multiple passes to avoid register spilling. This optimization opportunity
* is not used. Current coding also assumes that the compiler has the ability
* to unroll the inner loop to avoid loop overhead and minimize register
* spilling. For less sophisticated compilers it might be beneficial to
* manually unroll the inner loop.
*/
/* number of checksums to calculate in parallel */
#define N_SUMS 32
/* prime multiplier of FNV-1a hash */
#define FNV_PRIME 16777619
/* Use a union so that this code is valid under strict aliasing */
typedef union
{
PageHeaderData phdr;
uint32 data[BLCKSZ / (sizeof(uint32) * N_SUMS)][N_SUMS];
} PGChecksummablePage;
/*
* Base offsets to initialize each of the parallel FNV hashes into a
* different initial state.
*/
static const uint32 checksumBaseOffsets[N_SUMS] = {
0x5B1F36E9, 0xB8525960, 0x02AB50AA, 0x1DE66D2A,
0x79FF467A, 0x9BB9F8A3, 0x217E7CD2, 0x83E13D2C,
0xF8D4474F, 0xE39EB970, 0x42C6AE16, 0x993216FA,
0x7B093B5D, 0x98DAFF3C, 0xF718902A, 0x0B1C9CDB,
0xE58F764B, 0x187636BC, 0x5D7B3BB1, 0xE73DE7DE,
0x92BEC979, 0xCCA6C0B2, 0x304A0979, 0x85AA43D4,
0x783125BB, 0x6CA8EAA2, 0xE407EAC6, 0x4B5CFC3E,
0x9FBF8C76, 0x15CA20BE, 0xF2CA9FD3, 0x959BD756
};
/*
* Calculate one round of the checksum.
*/
#define CHECKSUM_COMP(checksum, value) \
do { \
uint32 __tmp = (checksum) ^ (value); \
(checksum) = __tmp * FNV_PRIME ^ (__tmp >> 17); \
} while (0)
/*
* Block checksum algorithm. The page must be adequately aligned
* (at least on 4-byte boundary).
*/
static uint32
pg_checksum_block(const PGChecksummablePage *page)
{
uint32 sums[N_SUMS];
uint32 result = 0;
uint32 i,
j;
/* ensure that the size is compatible with the algorithm */
Assert(sizeof(PGChecksummablePage) == BLCKSZ);
/* initialize partial checksums to their corresponding offsets */
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets));
/* main checksum calculation */
for (i = 0; i < (uint32) (BLCKSZ / (sizeof(uint32) * N_SUMS)); i++)
for (j = 0; j < N_SUMS; j++)
CHECKSUM_COMP(sums[j], page->data[i][j]);
/* finally add in two rounds of zeroes for additional mixing */
for (i = 0; i < 2; i++)
for (j = 0; j < N_SUMS; j++)
CHECKSUM_COMP(sums[j], 0);
/* xor fold partial checksums together */
for (i = 0; i < N_SUMS; i++)
result ^= sums[i];
return result;
}
/*
* Compute the checksum for a Postgres page.
*
* The page must be adequately aligned (at least on a 4-byte boundary).
* Beware also that the checksum field of the page is transiently zeroed.
*
* The checksum includes the block number (to detect the case where a page is
* somehow moved to a different location), the page header (excluding the
* checksum itself), and the page data.
*/
static uint16
pg_checksum_page(char *page, BlockNumber blkno)
{
PGChecksummablePage *cpage = (PGChecksummablePage *) page;
uint16 save_checksum;
uint32 checksum;
/*
* Save pd_checksum and temporarily set it to zero, so that the checksum
* calculation isn't affected by the old checksum stored on the page.
* Restore it after, because actually updating the checksum is NOT part of
* the API of this function.
*/
save_checksum = cpage->phdr.pd_checksum;
cpage->phdr.pd_checksum = 0;
checksum = pg_checksum_block(cpage);
cpage->phdr.pd_checksum = save_checksum;
/* Mix in the block number to detect transposed pages */
checksum ^= blkno;
/*
* Reduce to a uint16 (to fit in the pd_checksum field) with an offset of
* one. That avoids checksums of zero, which seems like a good idea.
*/
return (uint16) ((checksum % 65535) + 1);
}

View File

@@ -23,16 +23,6 @@ all versions of PostgreSQL supported by pgBackRest.
#include "common/assert.h"
#include "postgres/interface.h"
/***********************************************************************************************************************************
Define Assert() as ASSERT()
***********************************************************************************************************************************/
#define Assert(condition) ASSERT(condition)
/***********************************************************************************************************************************
Define BLCKSZ as PG_PAGE_SIZE_DEFAULT
***********************************************************************************************************************************/
#define BLCKSZ PG_PAGE_SIZE_DEFAULT
/***********************************************************************************************************************************
Types from src/include/c.h
***********************************************************************************************************************************/

View File

@@ -2071,10 +2071,6 @@ src/postgres/interface/page.c:
class: core
type: c
src/postgres/interface/pageChecksum.vendor.c.inc:
class: core/vendor
type: c
src/postgres/interface/static.vendor.h:
class: core/vendor
type: c/h

View File

@@ -192,7 +192,7 @@ hrnPgControlToBuffer(const unsigned int controlVersion, const unsigned int crc,
ASSERT(pgControl.version != 0);
// Set defaults if values are not passed
pgControl.pageSize = pgControl.pageSize == 0 ? PG_PAGE_SIZE_DEFAULT : pgControl.pageSize;
pgControl.pageSize = pgControl.pageSize == 0 ? pgPageSize8 : pgControl.pageSize;
pgControl.walSegmentSize = pgControl.walSegmentSize == 0 ? PG_WAL_SEGMENT_SIZE_DEFAULT : pgControl.walSegmentSize;
pgControl.catalogVersion =
pgControl.catalogVersion == 0 ? hrnPgInterfaceVersion(pgControl.version)->catalogVersion() : pgControl.catalogVersion;

View File

@@ -674,6 +674,8 @@ testRun(void)
// *****************************************************************************************************************************
if (testBegin("PageChecksum"))
{
#define PG_SEGMENT_PAGE_DEFAULT (PG_SEGMENT_SIZE_DEFAULT / pgPageSize8)
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("segment page default");
@@ -686,13 +688,14 @@ testRun(void)
bufUsedSet(buffer, bufSize(buffer));
memset(bufPtr(buffer), 0, bufSize(buffer));
*(PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0};
*(PageHeaderData *)(bufPtr(buffer) + (pgPageSize8 * 0x00)) = (PageHeaderData){.pd_upper = 0};
Buffer *bufferOut = bufNew(513);
IoWrite *write = ioBufferWriteNew(bufferOut);
ioFilterGroupAdd(
ioWriteFilterGroup(write),
pageChecksumNewPack(ioFilterParamList(pageChecksumNew(0, PG_SEGMENT_PAGE_DEFAULT, true, STRDEF(BOGUS_STR)))));
pageChecksumNewPack(
ioFilterParamList(pageChecksumNew(0, PG_SEGMENT_PAGE_DEFAULT, pgPageSize8, true, STRDEF(BOGUS_STR)))));
ioWriteOpen(write);
ioWrite(write, buffer);
TEST_ERROR(ioWrite(write, buffer), AssertError, "should not be possible to see two misaligned pages in a row");
@@ -701,29 +704,29 @@ testRun(void)
TEST_TITLE("retry a page with an invalid checksum");
// Write to file with valid checksums
buffer = bufNew(PG_PAGE_SIZE_DEFAULT * 4);
buffer = bufNew(pgPageSize8 * 4);
memset(bufPtr(buffer), 0, bufSize(buffer));
bufUsedSet(buffer, bufSize(buffer));
*(PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0x00};
*(PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0xFF};
((PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x01)))->pd_checksum = pgPageChecksum(
bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x01), 1);
*(PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x02)) = (PageHeaderData){.pd_upper = 0x00};
*(PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x03)) = (PageHeaderData){.pd_upper = 0xFE};
((PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x03)))->pd_checksum = pgPageChecksum(
bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x03), 3);
*(PageHeaderData *)(bufPtr(buffer) + (pgPageSize8 * 0x00)) = (PageHeaderData){.pd_upper = 0x00};
*(PageHeaderData *)(bufPtr(buffer) + (pgPageSize8 * 0x01)) = (PageHeaderData){.pd_upper = 0xFF};
((PageHeaderData *)(bufPtr(buffer) + (pgPageSize8 * 0x01)))->pd_checksum = pgPageChecksum(
bufPtr(buffer) + (pgPageSize8 * 0x01), 1, pgPageSize8);
*(PageHeaderData *)(bufPtr(buffer) + (pgPageSize8 * 0x02)) = (PageHeaderData){.pd_upper = 0x00};
*(PageHeaderData *)(bufPtr(buffer) + (pgPageSize8 * 0x03)) = (PageHeaderData){.pd_upper = 0xFE};
((PageHeaderData *)(bufPtr(buffer) + (pgPageSize8 * 0x03)))->pd_checksum = pgPageChecksum(
bufPtr(buffer) + (pgPageSize8 * 0x03), 3, pgPageSize8);
HRN_STORAGE_PUT(storageTest, "relation", buffer);
// Now break the checksum to force a retry
((PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x01)))->pd_checksum = 0;
((PageHeaderData *)(bufPtr(buffer) + (PG_PAGE_SIZE_DEFAULT * 0x03)))->pd_checksum = 0;
((PageHeaderData *)(bufPtr(buffer) + (pgPageSize8 * 0x01)))->pd_checksum = 0;
((PageHeaderData *)(bufPtr(buffer) + (pgPageSize8 * 0x03)))->pd_checksum = 0;
write = ioBufferWriteNew(bufferOut);
ioFilterGroupAdd(
ioWriteFilterGroup(write),
pageChecksumNew(0, PG_SEGMENT_PAGE_DEFAULT, true, storagePathP(storageTest, STRDEF("relation"))));
pageChecksumNew(0, PG_SEGMENT_PAGE_DEFAULT, pgPageSize8, true, storagePathP(storageTest, STRDEF("relation"))));
ioWriteOpen(write);
ioWrite(write, buffer);
ioWriteClose(write);
@@ -1808,7 +1811,8 @@ testRun(void)
unsigned int currentPercentComplete = 0;
TEST_ERROR(
backupJobResult((Manifest *)1, NULL, storageTest, strLstNew(), job, false, 0, NULL, &currentPercentComplete),
backupJobResult(
(Manifest *)1, NULL, storageTest, strLstNew(), job, false, pgPageSize8, 0, NULL, &currentPercentComplete),
AssertError, "error message");
// -------------------------------------------------------------------------------------------------------------------------
@@ -1843,7 +1847,8 @@ testRun(void)
TEST_RESULT_VOID(
backupJobResult(
manifest, STRDEF("host"), storageTest, strLstNew(), job, false, 0, &sizeProgress, &currentPercentComplete),
manifest, STRDEF("host"), storageTest, strLstNew(), job, false, pgPageSize8, 0, &sizeProgress,
&currentPercentComplete),
"log noop result");
TEST_RESULT_VOID(lockRelease(true), "release backup lock");
@@ -2791,49 +2796,49 @@ testRun(void)
HRN_SYSTEM_FMT("ln -s %s-data %s ", strZ(pg1Path), strZ(pg1Path));
// Zeroed file which passes page checksums
Buffer *relation = bufNew(PG_PAGE_SIZE_DEFAULT * 2);
Buffer *relation = bufNew(pgPageSize8 * 2);
memset(bufPtr(relation), 0, bufSize(relation));
bufUsedSet(relation, bufSize(relation));
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0};
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x00)) = (PageHeaderData){.pd_upper = 0};
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/1", relation, .timeModified = backupTimeStart);
// File which will fail on alignment
relation = bufNew(PG_PAGE_SIZE_DEFAULT + 512);
relation = bufNew(pgPageSize8 + 512);
memset(bufPtr(relation), 0, bufSize(relation));
bufUsedSet(relation, bufSize(relation));
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0xFE};
((PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)))->pd_checksum = pgPageChecksum(
bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00), 0);
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0xFF};
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x00)) = (PageHeaderData){.pd_upper = 0xFE};
((PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x00)))->pd_checksum = pgPageChecksum(
bufPtr(relation) + (pgPageSize8 * 0x00), 0, pgPageSize8);
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x01)) = (PageHeaderData){.pd_upper = 0xFF};
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/2", relation, .timeModified = backupTimeStart);
const char *rel1_2Sha1 = strZ(strNewEncode(encodingHex, cryptoHashOne(hashTypeSha1, relation)));
// File with bad page checksums
relation = bufNew(PG_PAGE_SIZE_DEFAULT * 5);
relation = bufNew(pgPageSize8 * 5);
memset(bufPtr(relation), 0, bufSize(relation));
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0xFF};
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0x00};
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02)) = (PageHeaderData){.pd_upper = 0xFE};
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x03)) = (PageHeaderData){.pd_upper = 0xEF};
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x04)) = (PageHeaderData){.pd_upper = 0x00};
(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x04))[PG_PAGE_SIZE_DEFAULT - 1] = 0xFF;
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x00)) = (PageHeaderData){.pd_upper = 0xFF};
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x01)) = (PageHeaderData){.pd_upper = 0x00};
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x02)) = (PageHeaderData){.pd_upper = 0xFE};
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x03)) = (PageHeaderData){.pd_upper = 0xEF};
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x04)) = (PageHeaderData){.pd_upper = 0x00};
(bufPtr(relation) + (pgPageSize8 * 0x04))[pgPageSize8 - 1] = 0xFF;
bufUsedSet(relation, bufSize(relation));
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/3", relation, .timeModified = backupTimeStart);
const char *rel1_3Sha1 = strZ(strNewEncode(encodingHex, cryptoHashOne(hashTypeSha1, relation)));
// File with bad page checksum
relation = bufNew(PG_PAGE_SIZE_DEFAULT * 3);
relation = bufNew(pgPageSize8 * 3);
memset(bufPtr(relation), 0, bufSize(relation));
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0x00};
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0x08};
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02)) = (PageHeaderData){.pd_upper = 0xFF};
((PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02)))->pd_checksum = pgPageChecksum(
bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02), 2);
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x00)) = (PageHeaderData){.pd_upper = 0x00};
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x01)) = (PageHeaderData){.pd_upper = 0x08};
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x02)) = (PageHeaderData){.pd_upper = 0xFF};
((PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x02)))->pd_checksum = pgPageChecksum(
bufPtr(relation) + (pgPageSize8 * 0x02), 2, pgPageSize8);
bufUsedSet(relation, bufSize(relation));
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/4", relation, .timeModified = backupTimeStart);
@@ -3028,17 +3033,17 @@ testRun(void)
HRN_CFG_LOAD(cfgCmdBackup, argList);
// File with bad page checksum and header errors that will be ignored
Buffer *relation = bufNew(PG_PAGE_SIZE_DEFAULT * 4);
Buffer *relation = bufNew(pgPageSize8 * 4);
memset(bufPtr(relation), 0, bufSize(relation));
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x00)) = (PageHeaderData){.pd_upper = 0xFF};
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x01)) = (PageHeaderData){.pd_upper = 0x00};
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02)) = (PageHeaderData){.pd_upper = 0x00};
(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02))[PG_PAGE_SIZE_DEFAULT - 1] = 0xFF;
((PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02)))->pd_checksum = pgPageChecksum(
bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x02), 2);
*(PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x03)) = (PageHeaderData){.pd_upper = 0x00};
(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x03))[PG_PAGE_SIZE_DEFAULT - 1] = 0xEE;
((PageHeaderData *)(bufPtr(relation) + (PG_PAGE_SIZE_DEFAULT * 0x03)))->pd_checksum = 1;
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x00)) = (PageHeaderData){.pd_upper = 0xFF};
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x01)) = (PageHeaderData){.pd_upper = 0x00};
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x02)) = (PageHeaderData){.pd_upper = 0x00};
(bufPtr(relation) + (pgPageSize8 * 0x02))[pgPageSize8 - 1] = 0xFF;
((PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x02)))->pd_checksum = pgPageChecksum(
bufPtr(relation) + (pgPageSize8 * 0x02), 2, pgPageSize8);
*(PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x03)) = (PageHeaderData){.pd_upper = 0x00};
(bufPtr(relation) + (pgPageSize8 * 0x03))[pgPageSize8 - 1] = 0xEE;
((PageHeaderData *)(bufPtr(relation) + (pgPageSize8 * 0x03)))->pd_checksum = 1;
bufUsedSet(relation, bufSize(relation));
HRN_STORAGE_PUT(storagePgWrite(), PG_PATH_BASE "/1/3", relation, .timeModified = backupTimeStart);
@@ -3046,7 +3051,7 @@ testRun(void)
// File will be truncated during backup to show that actual file size is copied no matter what original size is. This
// will also cause an alignment error.
Buffer *relationAfter = bufNew(PG_PAGE_SIZE_DEFAULT + 15);
Buffer *relationAfter = bufNew(pgPageSize8 + 15);
memset(bufPtr(relationAfter), 0, bufSize(relationAfter));
bufUsedSet(relationAfter, bufSize(relationAfter));
@@ -3180,14 +3185,14 @@ testRun(void)
.walSegmentSize = 1024 * 1024);
// Set to a smaller values than the defaults allow
cfgOptionSet(cfgOptRepoBundleSize, cfgSourceParam, VARINT64(PG_PAGE_SIZE_DEFAULT));
cfgOptionSet(cfgOptRepoBundleLimit, cfgSourceParam, VARINT64(PG_PAGE_SIZE_DEFAULT));
cfgOptionSet(cfgOptRepoBundleSize, cfgSourceParam, VARINT64(pgPageSize8));
cfgOptionSet(cfgOptRepoBundleLimit, cfgSourceParam, VARINT64(pgPageSize8));
// Zero-length file to be stored
HRN_STORAGE_PUT_EMPTY(storagePgWrite(), "zero", .timeModified = backupTimeStart);
// Zeroed file which passes page checksums
Buffer *relation = bufNew(PG_PAGE_SIZE_DEFAULT * 3);
Buffer *relation = bufNew(pgPageSize8 * 3);
memset(bufPtr(relation), 0, bufSize(relation));
bufUsedSet(relation, bufSize(relation));
@@ -3198,7 +3203,7 @@ testRun(void)
HRN_STORAGE_PUT_Z(storagePgWrite(), "stuff.conf", "CONFIGSTUFF3", .timeModified = 1500000000);
// File that will get skipped while bundling smaller files and end up a bundle by itself
Buffer *bigish = bufNew(PG_PAGE_SIZE_DEFAULT - 1);
Buffer *bigish = bufNew(pgPageSize8 - 1);
memset(bufPtr(bigish), 0, bufSize(bigish));
bufUsedSet(bigish, bufSize(bigish));
@@ -3905,7 +3910,8 @@ testRun(void)
HRN_STORAGE_PATH_REMOVE(storageTest, "pg1", .recurse = true);
// Update pg_control
HRN_PG_CONTROL_PUT(storagePgWrite(), PG_VERSION_11, .pageChecksum = true, .walSegmentSize = 2 * 1024 * 1024);
HRN_PG_CONTROL_PUT(
storagePgWrite(), PG_VERSION_11, .pageChecksum = true, .walSegmentSize = 2 * 1024 * 1024, .pageSize = pgPageSize4);
// Update version
HRN_STORAGE_PUT_Z(storagePgWrite(), PG_FILE_PGVERSION, PG_VERSION_11_Z, .timeModified = backupTimeStart);
@@ -3925,13 +3931,13 @@ testRun(void)
HRN_CFG_LOAD(cfgCmdBackup, argList);
// File that will grow during the backup
Buffer *const fileGrow = bufNew(PG_PAGE_SIZE_DEFAULT * 4);
memset(bufPtr(fileGrow), 0, PG_PAGE_SIZE_DEFAULT * 3);
bufUsedSet(fileGrow, PG_PAGE_SIZE_DEFAULT * 3);
Buffer *const fileGrow = bufNew(pgPageSize4 * 4);
memset(bufPtr(fileGrow), 0, pgPageSize4 * 3);
bufUsedSet(fileGrow, pgPageSize4 * 3);
HRN_STORAGE_PUT(storagePgWrite(), "global/1", fileGrow, .timeModified = backupTimeStart);
memset(bufPtr(fileGrow) + PG_PAGE_SIZE_DEFAULT * 3, 0xFF, PG_PAGE_SIZE_DEFAULT);
memset(bufPtr(fileGrow) + pgPageSize4 * 3, 0xFF, pgPageSize4);
bufUsedSet(fileGrow, bufSize(fileGrow));
// Also write a copy of it that will get a checksum error, just to be sure the read limit on global/1 is working
@@ -3947,15 +3953,15 @@ testRun(void)
TEST_RESULT_VOID(hrnCmdBackup(), "backup");
// Make sure that global/1 grew as expected but the extra bytes were not copied
TEST_RESULT_UINT(storageInfoP(storagePgWrite(), STRDEF("global/1")).size, 32768, "check global/1 grew");
TEST_RESULT_UINT(storageInfoP(storagePgWrite(), STRDEF("global/1")).size, 16384, "check global/1 grew");
TEST_RESULT_LOG(
"P00 INFO: execute non-exclusive backup start: backup begins after the next regular checkpoint completes\n"
"P00 INFO: backup start archive = 0000000105DC9B4000000000, lsn = 5dc9b40/0\n"
"P00 INFO: check archive for segment 0000000105DC9B4000000000\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/2 (32KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/2 (16KB, [PCT]) checksum [SHA1]\n"
"P00 WARN: invalid page checksum found in file " TEST_PATH "/pg1/global/2 at page 3\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/1 (24KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/1 (12KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (bundle 1/0, 8KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (bundle 1/8224, 2B, [PCT]) checksum [SHA1]\n"
"P00 INFO: execute non-exclusive backup stop and wait for all WAL segments to archive\n"
@@ -3977,8 +3983,8 @@ testRun(void)
"pg_data {path}\n"
"pg_data/backup_label {file, s=17}\n"
"pg_data/global {path}\n"
"pg_data/global/1 {file, s=24576}\n"
"pg_data/global/2 {file, s=32768}\n"
"pg_data/global/1 {file, s=12288}\n"
"pg_data/global/2 {file, s=16384}\n"
"pg_data/tablespace_map {file, s=19}\n"
"--------\n"
"[backup:target]\n"
@@ -3989,10 +3995,10 @@ testRun(void)
",\"timestamp\":1573500000}\n"
"pg_data/backup_label={\"checksum\":\"8e6f41ac87a7514be96260d65bacbffb11be77dc\",\"repo-size\":48,\"size\":17"
",\"timestamp\":1573500002}\n"
"pg_data/global/1={\"checksum\":\"ebdd38b69cd5b9f2d00d273c981e16960fbbb4f7\",\"checksum-page\":true"
",\"repo-size\":24608,\"size\":24576,\"timestamp\":1573500000}\n"
"pg_data/global/2={\"checksum\":\"bc807e211d8fe3b5c2f20a88d2a96257bc10ac44\",\"checksum-page\":false"
",\"checksum-page-error\":[3],\"repo-size\":32800,\"size\":32768,\"timestamp\":1573500000}\n"
"pg_data/global/1={\"checksum\":\"7cb41fea50720b48be0c145e1473982b23e9ab77\",\"checksum-page\":true"
",\"repo-size\":12320,\"size\":12288,\"timestamp\":1573500000}\n"
"pg_data/global/2={\"checksum\":\"02af87d042262a0313120317db0c285b3210209f\",\"checksum-page\":false"
",\"checksum-page-error\":[3],\"repo-size\":16416,\"size\":16384,\"timestamp\":1573500000}\n"
"pg_data/global/pg_control={\"repo-size\":8224,\"size\":8192,\"timestamp\":1573500000}\n"
"pg_data/tablespace_map={\"checksum\":\"87fe624d7976c2144e10afcb7a9a49b071f35e9c\",\"repo-size\":48"
",\"size\":19,\"timestamp\":1573500002}\n"

View File

@@ -3,6 +3,7 @@ Test PostgreSQL Interface
***********************************************************************************************************************************/
#include "storage/posix/storage.h"
#include "common/harnessConfig.h"
#include "common/harnessPostgres.h"
/***********************************************************************************************************************************
@@ -14,6 +15,11 @@ testRun(void)
FUNCTION_HARNESS_VOID();
Storage *storageTest = storagePosixNewP(TEST_PATH_STR, .write = true);
// Test configurations loading to initialize cfgOptFork value (PostgreSQL)
StringList *argList = strLstNew();
hrnCfgArgRawZ(argList, cfgOptStanza, "test");
hrnCfgArgKeyRawZ(argList, cfgOptPgPath, 1, "/pg1");
hrnCfgLoadP(cfgCmdBackup, argList);
// *****************************************************************************************************************************
if (testBegin("pgVersionFromStr() and pgVersionToStr()"))
@@ -122,14 +128,16 @@ testRun(void)
"wal segment size is 1048576 but must be 16777216 for PostgreSQL <= 10");
// -------------------------------------------------------------------------------------------------------------------------
HRN_PG_CONTROL_PUT(storageTest, PG_VERSION_95, .pageSize = 32 * 1024);
HRN_PG_CONTROL_PUT(storageTest, PG_VERSION_95, .pageSize = 64 * 1024);
TEST_ERROR(pgControlFromFile(storageTest, NULL), FormatError, "page size is 32768 but must be 8192");
TEST_ERROR(
pgControlFromFile(storageTest, NULL), FormatError,
"page size is 65536 but only 1024, 2048, 4096, 8192, 16384, and 32768 are supported");
// -------------------------------------------------------------------------------------------------------------------------
HRN_PG_CONTROL_PUT(
storageTest, PG_VERSION_94, .systemId = 0xEFEFEFEFEF, .catalogVersion = hrnPgCatalogVersion(PG_VERSION_94),
.checkpoint = 0xAABBAABBEEFFEEFF, .timeline = 88);
.checkpoint = 0xAABBAABBEEFFEEFF, .timeline = 88, .pageSize = pgPageSize8);
TEST_ASSIGN(info, pgControlFromFile(storageTest, NULL), "get control info v90");
TEST_RESULT_UINT(info.systemId, 0xEFEFEFEFEF, " check system id");
@@ -137,6 +145,72 @@ testRun(void)
TEST_RESULT_UINT(info.catalogVersion, 201409291, " check catalog version");
TEST_RESULT_UINT(info.checkpoint, 0xAABBAABBEEFFEEFF, "check checkpoint");
TEST_RESULT_UINT(info.timeline, 88, "check timeline");
TEST_RESULT_UINT(info.pageSize, pgPageSize8, "check page size");
// -------------------------------------------------------------------------------------------------------------------------
HRN_PG_CONTROL_PUT(
storageTest, PG_VERSION_16, .systemId = 0xEFEFEFEFEF, .catalogVersion = hrnPgCatalogVersion(PG_VERSION_16),
.checkpoint = 0xAABBAABBEEFFEEFF, .timeline = 88, .pageSize = pgPageSize1);
TEST_ASSIGN(info, pgControlFromFile(storageTest, NULL), "get control info v90");
TEST_RESULT_UINT(info.systemId, 0xEFEFEFEFEF, " check system id");
TEST_RESULT_UINT(info.version, PG_VERSION_16, "check version");
TEST_RESULT_UINT(info.catalogVersion, 202307071, "check catalog version");
TEST_RESULT_UINT(info.checkpoint, 0xAABBAABBEEFFEEFF, "check checkpoint");
TEST_RESULT_UINT(info.timeline, 88, "check timeline");
TEST_RESULT_UINT(info.pageSize, pgPageSize1, "check page size");
// -------------------------------------------------------------------------------------------------------------------------
HRN_PG_CONTROL_PUT(
storageTest, PG_VERSION_16, .systemId = 0xEFEFEFEFEF, .catalogVersion = hrnPgCatalogVersion(PG_VERSION_16),
.checkpoint = 0xAABBAABBEEFFEEFF, .timeline = 88, .pageSize = pgPageSize2);
TEST_ASSIGN(info, pgControlFromFile(storageTest, NULL), "get control info v90");
TEST_RESULT_UINT(info.systemId, 0xEFEFEFEFEF, " check system id");
TEST_RESULT_UINT(info.version, PG_VERSION_16, "check version");
TEST_RESULT_UINT(info.catalogVersion, 202307071, "check catalog version");
TEST_RESULT_UINT(info.checkpoint, 0xAABBAABBEEFFEEFF, "check checkpoint");
TEST_RESULT_UINT(info.timeline, 88, "check timeline");
TEST_RESULT_UINT(info.pageSize, pgPageSize2, "check page size");
// -------------------------------------------------------------------------------------------------------------------------
HRN_PG_CONTROL_PUT(
storageTest, PG_VERSION_16, .systemId = 0xEFEFEFEFEF, .catalogVersion = hrnPgCatalogVersion(PG_VERSION_16),
.checkpoint = 0xAABBAABBEEFFEEFF, .timeline = 88, .pageSize = pgPageSize4);
TEST_ASSIGN(info, pgControlFromFile(storageTest, NULL), "get control info v90");
TEST_RESULT_UINT(info.systemId, 0xEFEFEFEFEF, " check system id");
TEST_RESULT_UINT(info.version, PG_VERSION_16, "check version");
TEST_RESULT_UINT(info.catalogVersion, 202307071, "check catalog version");
TEST_RESULT_UINT(info.checkpoint, 0xAABBAABBEEFFEEFF, "check checkpoint");
TEST_RESULT_UINT(info.timeline, 88, "check timeline");
TEST_RESULT_UINT(info.pageSize, pgPageSize4, "check page size");
// -------------------------------------------------------------------------------------------------------------------------
HRN_PG_CONTROL_PUT(
storageTest, PG_VERSION_16, .systemId = 0xEFEFEFEFEF, .catalogVersion = hrnPgCatalogVersion(PG_VERSION_16),
.checkpoint = 0xAABBAABBEEFFEEFF, .timeline = 88, .pageSize = pgPageSize16);
TEST_ASSIGN(info, pgControlFromFile(storageTest, NULL), "get control info v90");
TEST_RESULT_UINT(info.systemId, 0xEFEFEFEFEF, " check system id");
TEST_RESULT_UINT(info.version, PG_VERSION_16, "check version");
TEST_RESULT_UINT(info.catalogVersion, 202307071, "check catalog version");
TEST_RESULT_UINT(info.checkpoint, 0xAABBAABBEEFFEEFF, "check checkpoint");
TEST_RESULT_UINT(info.timeline, 88, "check timeline");
TEST_RESULT_UINT(info.pageSize, pgPageSize16, "check page size");
// -------------------------------------------------------------------------------------------------------------------------
HRN_PG_CONTROL_PUT(
storageTest, PG_VERSION_16, .systemId = 0xEFEFEFEFEF, .catalogVersion = hrnPgCatalogVersion(PG_VERSION_16),
.checkpoint = 0xAABBAABBEEFFEEFF, .timeline = 88, .pageSize = pgPageSize32);
TEST_ASSIGN(info, pgControlFromFile(storageTest, NULL), "get control info v90");
TEST_RESULT_UINT(info.systemId, 0xEFEFEFEFEF, " check system id");
TEST_RESULT_UINT(info.version, PG_VERSION_16, "check version");
TEST_RESULT_UINT(info.catalogVersion, 202307071, "check catalog version");
TEST_RESULT_UINT(info.checkpoint, 0xAABBAABBEEFFEEFF, "check checkpoint");
TEST_RESULT_UINT(info.timeline, 88, "check timeline");
TEST_RESULT_UINT(info.pageSize, pgPageSize32, "check page size");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("force control version");
@@ -219,7 +293,7 @@ testRun(void)
TEST_RESULT_STR_Z(pgLsnName(PG_VERSION_96), "location", "check location name");
TEST_RESULT_STR_Z(pgLsnName(PG_VERSION_10), "lsn", "check lsn name");
TEST_RESULT_STR_Z(pgTablespaceId(PG_VERSION_94, 201306121), "PG_9.4_201306121", "check 9.4 tablespace id");
TEST_RESULT_STR_Z(pgTablespaceId(PG_VERSION_94, 201409291), "PG_9.4_201409291", "check 9.4 tablespace id");
TEST_RESULT_STR_Z(pgTablespaceId(PG_VERSION_16, 999999999), "PG_16_999999999", "check 16 tablespace id");
TEST_RESULT_STR_Z(pgWalName(PG_VERSION_96), "xlog", "check xlog name");
@@ -235,11 +309,87 @@ testRun(void)
// *****************************************************************************************************************************
if (testBegin("pgPageChecksum()"))
{
unsigned char page[PG_PAGE_SIZE_DEFAULT];
memset(page, 0xFF, PG_PAGE_SIZE_DEFAULT);
TEST_TITLE("1KiB page checksum");
{
unsigned char page[pgPageSize1];
memset(page, 0xFF, sizeof(page));
TEST_RESULT_UINT(pgPageChecksum(page, 0), TEST_BIG_ENDIAN() ? 0xF55E : 0x0E1C, "check 0xFF filled page, block 0");
TEST_RESULT_UINT(pgPageChecksum(page, 999), TEST_BIG_ENDIAN() ? 0xF1B9 : 0x0EC3, "check 0xFF filled page, block 999");
TEST_RESULT_UINT(
pgPageChecksum(page, 0, sizeof(page)), TEST_BIG_ENDIAN() ? 0x980F : 0x016E, "check 0xFF filled page, block 0");
TEST_RESULT_UINT(
pgPageChecksum(page, 999, sizeof(page)), TEST_BIG_ENDIAN() ? 0x982A : 0x0391, "check 0xFF filled page, block 999");
}
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("2KiB page checksum");
{
unsigned char page[pgPageSize2];
memset(page, 0xFF, sizeof(page));
TEST_RESULT_UINT(
pgPageChecksum(page, 0, sizeof(page)), TEST_BIG_ENDIAN() ? 0x4937 : 0xB57B, "check 0xFF filled page, block 0");
TEST_RESULT_UINT(
pgPageChecksum(page, 999, sizeof(page)), TEST_BIG_ENDIAN() ? 0x48D8 : 0xB7A2, "check 0xFF filled page, block 999");
}
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("4KiB page checksum");
{
unsigned char page[pgPageSize4];
memset(page, 0xFF, sizeof(page));
TEST_RESULT_UINT(
pgPageChecksum(page, 0, sizeof(page)), TEST_BIG_ENDIAN() ? 0x81DA : 0x5B9B, "check 0xFF filled page, block 0");
TEST_RESULT_UINT(
pgPageChecksum(page, 999, sizeof(page)), TEST_BIG_ENDIAN() ? 0x7EB7 : 0x5BB8, "check 0xFF filled page, block 999");
}
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("8KiB page checksum");
{
unsigned char page[pgPageSize8];
memset(page, 0xFF, sizeof(page));
TEST_RESULT_UINT(
pgPageChecksum(page, 0, sizeof(page)), TEST_BIG_ENDIAN() ? 0xF55E : 0x0E1C, "check 0xFF filled page, block 0");
TEST_RESULT_UINT(
pgPageChecksum(page, 999, sizeof(page)), TEST_BIG_ENDIAN() ? 0xF1B9 : 0x0EC3, "check 0xFF filled page, block 999");
}
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("16KiB page checksum");
{
unsigned char page[pgPageSize16];
memset(page, 0xFF, sizeof(page));
TEST_RESULT_UINT(
pgPageChecksum(page, 0, sizeof(page)), TEST_BIG_ENDIAN() ? 0xA2AD : 0x158E, "check 0xFF filled page, block 0");
TEST_RESULT_UINT(
pgPageChecksum(page, 999, sizeof(page)), TEST_BIG_ENDIAN() ? 0xA548 : 0x18AD, "check 0xFF filled page, block 999");
}
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("32KiB page checksum");
{
unsigned char page[pgPageSize32];
memset(page, 0xFF, sizeof(page));
TEST_RESULT_UINT(
pgPageChecksum(page, 0, sizeof(page)), TEST_BIG_ENDIAN() ? 0x7F66 : 0x5366, "check 0xFF filled page, block 0");
TEST_RESULT_UINT(
pgPageChecksum(page, 999, sizeof(page)), TEST_BIG_ENDIAN() ? 0x82C5 : 0x5745, "check 0xFF filled page, block 999");
}
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("invalid page size error");
{
unsigned char page[64 * 1024];
memset(page, 0xFF, sizeof(page));
TEST_ERROR(
pgPageChecksum(page, 0, sizeof(page)), FormatError,
"page size is 65536 but only 1024, 2048, 4096, 8192, 16384, and 32768 are supported");
}
}
// *****************************************************************************************************************************