1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2024-12-12 10:04:14 +02:00

Update command/backup module to recent coding standards.

Add const as appropriate and avoid setting results to NULL if the result will definitely be set later on.
This commit is contained in:
David Steele 2023-04-11 12:46:00 +04:00
parent f33e1cfb16
commit a9c5cd9749
5 changed files with 100 additions and 101 deletions

View File

@ -44,7 +44,7 @@ Backup Command
Generate a unique backup label that does not contain a timestamp from a previous backup
***********************************************************************************************************************************/
static String *
backupLabelCreate(BackupType type, const String *backupLabelPrior, time_t timestamp)
backupLabelCreate(const BackupType type, const String *const backupLabelPrior, const time_t timestamp)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STRING_ID, type);
@ -55,7 +55,7 @@ backupLabelCreate(BackupType type, const String *backupLabelPrior, time_t timest
ASSERT((type == backupTypeFull && backupLabelPrior == NULL) || (type != backupTypeFull && backupLabelPrior != NULL));
ASSERT(timestamp > 0);
String *result = NULL;
String *result;
MEM_CONTEXT_TEMP_BEGIN()
{
@ -165,7 +165,7 @@ typedef struct BackupData
} BackupData;
static BackupData *
backupInit(const InfoBackup *infoBackup)
backupInit(const InfoBackup *const infoBackup)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(INFO_BACKUP, infoBackup);
@ -176,11 +176,11 @@ backupInit(const InfoBackup *infoBackup)
ASSERT(infoBackup != NULL);
// Initialize for offline backup
BackupData *result = memNew(sizeof(BackupData));
BackupData *const result = memNew(sizeof(BackupData));
*result = (BackupData){0};
// Don't allow backup from standby when offline
InfoPgData infoPg = infoPgDataCurrent(infoBackupPg(infoBackup));
const InfoPgData infoPg = infoPgDataCurrent(infoBackupPg(infoBackup));
if (!cfgOptionBool(cfgOptOnline) && cfgOptionBool(cfgOptBackupStandby))
{
@ -194,8 +194,8 @@ backupInit(const InfoBackup *infoBackup)
if (cfgOptionBool(cfgOptOnline))
{
bool backupStandby = cfgOptionBool(cfgOptBackupStandby);
DbGetResult dbInfo = dbGet(!backupStandby, true, backupStandby);
const bool backupStandby = cfgOptionBool(cfgOptBackupStandby);
const DbGetResult dbInfo = dbGet(!backupStandby, true, backupStandby);
result->pgIdxPrimary = dbInfo.primaryIdx;
result->dbPrimary = dbInfo.primary;
@ -319,7 +319,7 @@ static const ManifestBlockIncrMap manifestBlockIncrMap =
// Convert map size
static unsigned int
backupBlockIncrMapSize(ConfigOption optionId, unsigned int optionKeyIdx, const String *const value)
backupBlockIncrMapSize(const ConfigOption optionId, const unsigned int optionKeyIdx, const String *const value)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(ENUM, optionId);
@ -353,7 +353,7 @@ backupBlockIncrMapSize(ConfigOption optionId, unsigned int optionKeyIdx, const S
// Convert map checksum size
static unsigned int
backupBlockIncrMapChecksumSize(ConfigOption optionId, unsigned int optionKeyIdx, const Variant *const value)
backupBlockIncrMapChecksumSize(const ConfigOption optionId, const unsigned int optionKeyIdx, const Variant *const value)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(ENUM, optionId);
@ -488,7 +488,7 @@ backupBlockIncrMap(void)
Get time from the database or locally depending on online
***********************************************************************************************************************************/
static time_t
backupTime(BackupData *backupData, bool waitRemainder)
backupTime(BackupData *const backupData, const bool waitRemainder)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(BACKUP_DATA, backupData);
@ -539,7 +539,7 @@ Create an incremental backup if type is not full and a compatible prior backup e
***********************************************************************************************************************************/
// Helper to find a compatible prior backup
static Manifest *
backupBuildIncrPrior(const InfoBackup *infoBackup)
backupBuildIncrPrior(const InfoBackup *const infoBackup)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(INFO_BACKUP, infoBackup);
@ -550,19 +550,19 @@ backupBuildIncrPrior(const InfoBackup *infoBackup)
Manifest *result = NULL;
// No incremental if backup type is full
BackupType type = (BackupType)cfgOptionStrId(cfgOptType);
const BackupType type = (BackupType)cfgOptionStrId(cfgOptType);
if (type != backupTypeFull)
{
MEM_CONTEXT_TEMP_BEGIN()
{
InfoPgData infoPg = infoPgDataCurrent(infoBackupPg(infoBackup));
const InfoPgData infoPg = infoPgDataCurrent(infoBackupPg(infoBackup));
const String *backupLabelPrior = NULL;
unsigned int backupTotal = infoBackupDataTotal(infoBackup);
const unsigned int backupTotal = infoBackupDataTotal(infoBackup);
for (unsigned int backupIdx = backupTotal - 1; backupIdx < backupTotal; backupIdx--)
{
InfoBackupData backupPrior = infoBackupData(infoBackup, backupIdx);
const InfoBackupData backupPrior = infoBackupData(infoBackup, backupIdx);
// The prior backup for a diff must be full
if (type == backupTypeDiff && backupPrior.backupType != backupTypeFull)
@ -583,7 +583,7 @@ backupBuildIncrPrior(const InfoBackup *infoBackup)
result = manifestLoadFile(
storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strZ(backupLabelPrior)),
cfgOptionStrId(cfgOptRepoCipherType), infoPgCipherPass(infoBackupPg(infoBackup)));
const ManifestData *manifestPriorData = manifestData(result);
const ManifestData *const manifestPriorData = manifestData(result);
LOG_INFO_FMT(
"last backup label = %s, version = %s", strZ(manifestData(result)->backupLabel),
@ -624,7 +624,7 @@ backupBuildIncrPrior(const InfoBackup *infoBackup)
// certain files would be checksummed and the list could be incomplete during reporting.
else
{
bool checksumPagePrior = varBool(manifestData(result)->backupOptionChecksumPage);
const bool checksumPagePrior = varBool(manifestData(result)->backupOptionChecksumPage);
// Warn if an incompatible setting was explicitly requested
if (checksumPagePrior != cfgOptionBool(cfgOptChecksumPage))
@ -653,7 +653,8 @@ backupBuildIncrPrior(const InfoBackup *infoBackup)
}
static bool
backupBuildIncr(const InfoBackup *infoBackup, Manifest *manifest, Manifest *manifestPrior, const String *archiveStart)
backupBuildIncr(
const InfoBackup *const infoBackup, Manifest *const manifest, Manifest *const manifestPrior, const String *const archiveStart)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(INFO_BACKUP, infoBackup);
@ -854,7 +855,7 @@ backupResumeClean(
// Helper to find a resumable backup
static const Manifest *
backupResumeFind(const Manifest *manifest, const String *cipherPassBackup)
backupResumeFind(const Manifest *const manifest, const String *const cipherPassBackup)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(MANIFEST, manifest);
@ -868,7 +869,7 @@ backupResumeFind(const Manifest *manifest, const String *cipherPassBackup)
MEM_CONTEXT_TEMP_BEGIN()
{
// Only the last backup can be resumed
const StringList *backupList = strLstSort(
const StringList *const backupList = strLstSort(
storageListP(
storageRepo(), STRDEF(STORAGE_REPO_BACKUP),
.expression = backupRegExpP(.full = true, .differential = true, .incremental = true)),
@ -876,8 +877,8 @@ backupResumeFind(const Manifest *manifest, const String *cipherPassBackup)
if (!strLstEmpty(backupList))
{
const String *backupLabel = strLstGet(backupList, 0);
const String *manifestFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strZ(backupLabel));
const String *const backupLabel = strLstGet(backupList, 0);
const String *const manifestFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strZ(backupLabel));
// Resumable backups do not have backup.manifest
if (!storageExistsP(storageRepo(), manifestFile))
@ -976,7 +977,7 @@ backupResumeFind(const Manifest *manifest, const String *cipherPassBackup)
}
static bool
backupResume(Manifest *manifest, const String *cipherPassBackup)
backupResume(Manifest *const manifest, const String *const cipherPassBackup)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(MANIFEST, manifest);
@ -989,7 +990,7 @@ backupResume(Manifest *manifest, const String *cipherPassBackup)
MEM_CONTEXT_TEMP_BEGIN()
{
const Manifest *manifestResume = backupResumeFind(manifest, cipherPassBackup);
const Manifest *const manifestResume = backupResumeFind(manifest, cipherPassBackup);
// If a resumable backup was found set the label and cipher subpass
if (manifestResume)
@ -1033,7 +1034,7 @@ typedef struct BackupStartResult
} BackupStartResult;
static BackupStartResult
backupStart(BackupData *backupData)
backupStart(BackupData *const backupData)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(BACKUP_DATA, backupData);
@ -1079,7 +1080,7 @@ backupStart(BackupData *backupData)
backupData->version >= PG_VERSION_96 ? "non-" : "",
cfgOptionBool(cfgOptStartFast) ? "requested immediate" : "next regular");
DbBackupStartResult dbBackupStartResult = dbBackupStart(
const DbBackupStartResult dbBackupStartResult = dbBackupStart(
backupData->dbPrimary, cfgOptionBool(cfgOptStartFast), cfgOptionBool(cfgOptStopAuto),
cfgOptionBool(cfgOptArchiveCheck));
@ -1128,7 +1129,9 @@ Stop the backup
***********************************************************************************************************************************/
// Helper to write a file from a string to the repository and update the manifest
static void
backupFilePut(BackupData *backupData, Manifest *manifest, const String *name, time_t timestamp, const String *content)
backupFilePut(
BackupData *const backupData, Manifest *const manifest, const String *const name, const time_t timestamp,
const String *const content)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(BACKUP_DATA, backupData);
@ -1145,17 +1148,17 @@ backupFilePut(BackupData *backupData, Manifest *manifest, const String *name, ti
{
// Create file
bool repoChecksum = false;
const String *manifestName = strNewFmt(MANIFEST_TARGET_PGDATA "/%s", strZ(name));
CompressType compressType = compressTypeEnum(cfgOptionStrId(cfgOptCompressType));
const String *const manifestName = strNewFmt(MANIFEST_TARGET_PGDATA "/%s", strZ(name));
const CompressType compressType = compressTypeEnum(cfgOptionStrId(cfgOptCompressType));
StorageWrite *write = storageNewWriteP(
StorageWrite *const write = storageNewWriteP(
storageRepoWrite(),
backupFileRepoPathP(
manifestData(manifest)->backupLabel, .manifestName = manifestName,
.compressType = compressTypeEnum(cfgOptionStrId(cfgOptCompressType))),
.compressible = true);
IoFilterGroup *filterGroup = ioWriteFilterGroup(storageWriteIo(write));
IoFilterGroup *const filterGroup = ioWriteFilterGroup(storageWriteIo(write));
// Add SHA1 filter
ioFilterGroupAdd(filterGroup, cryptoHashNew(hashTypeSha1));
@ -1191,7 +1194,7 @@ backupFilePut(BackupData *backupData, Manifest *manifest, const String *name, ti
storagePutP(write, BUFSTR(content));
// Use base path to set ownership and mode
const ManifestPath *basePath = manifestPathFind(manifest, MANIFEST_TARGET_PGDATA_STR);
const ManifestPath *const basePath = manifestPathFind(manifest, MANIFEST_TARGET_PGDATA_STR);
// Add to manifest
ManifestFile file =
@ -1228,7 +1231,7 @@ typedef struct BackupStopResult
} BackupStopResult;
static BackupStopResult
backupStop(BackupData *backupData, Manifest *manifest)
backupStop(BackupData *const backupData, Manifest *const manifest)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(BACKUP_DATA, backupData);
@ -1248,7 +1251,7 @@ backupStop(BackupData *backupData, Manifest *manifest)
"execute %sexclusive backup stop and wait for all WAL segments to archive",
backupData->version >= PG_VERSION_96 ? "non-" : "");
DbBackupStopResult dbBackupStopResult = dbBackupStop(backupData->dbPrimary);
const DbBackupStopResult dbBackupStopResult = dbBackupStop(backupData->dbPrimary);
MEM_CONTEXT_PRIOR_BEGIN()
{
@ -1328,7 +1331,7 @@ backupJobResultPageChecksum(PackRead *const checksumPageResult)
// Combine results into a more compact form
while (pckReadNext(checksumPageResult))
{
unsigned int pageId = pckReadId(checksumPageResult) - 1;
const unsigned int pageId = pckReadId(checksumPageResult) - 1;
pckReadObjBeginP(checksumPageResult, .id = pageId + 1);
// If first error then just store page
@ -1607,7 +1610,7 @@ backupManifestSaveCopy(Manifest *const manifest, const String *const cipherPassB
MEM_CONTEXT_TEMP_BEGIN()
{
// Open file for write
IoWrite *write = storageWriteIo(
IoWrite *const write = storageWriteIo(
storageNewWriteP(
storageRepoWrite(),
strNewFmt(
@ -1696,7 +1699,7 @@ static bool backupProcessQueueComparatorBundle;
static uint64_t backupProcessQueueComparatorBundleLimit;
static int
backupProcessQueueComparator(const void *item1, const void *item2)
backupProcessQueueComparator(const void *const item1, const void *const item2)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM_P(VOID, item1);
@ -1707,8 +1710,8 @@ backupProcessQueueComparator(const void *item1, const void *item2)
ASSERT(item2 != NULL);
// Unpack files
ManifestFile file1 = manifestFileUnpack(backupProcessQueueComparatorManifest, *(const ManifestFilePack **)item1);
ManifestFile file2 = manifestFileUnpack(backupProcessQueueComparatorManifest, *(const ManifestFilePack **)item2);
const ManifestFile file1 = manifestFileUnpack(backupProcessQueueComparatorManifest, *(const ManifestFilePack **)item1);
const ManifestFile file2 = manifestFileUnpack(backupProcessQueueComparatorManifest, *(const ManifestFilePack **)item2);
// If the size differs then that's enough to determine order
if (!backupProcessQueueComparatorBundle || file1.size > backupProcessQueueComparatorBundleLimit ||
@ -1755,25 +1758,25 @@ backupProcessQueue(const BackupData *const backupData, Manifest *const manifest,
jobData->queueList = lstNewP(sizeof(void *));
// Generate the list of targets
StringList *targetList = strLstNew();
StringList *const targetList = strLstNew();
strLstAddZ(targetList, MANIFEST_TARGET_PGDATA "/");
for (unsigned int targetIdx = 0; targetIdx < manifestTargetTotal(manifest); targetIdx++)
{
const ManifestTarget *target = manifestTarget(manifest, targetIdx);
const ManifestTarget *const target = manifestTarget(manifest, targetIdx);
if (target->tablespaceId != 0)
strLstAddFmt(targetList, "%s/", strZ(target->name));
}
// Generate the processing queues (there is always at least one)
unsigned int queueOffset = jobData->backupStandby ? 1 : 0;
const unsigned int queueOffset = jobData->backupStandby ? 1 : 0;
MEM_CONTEXT_BEGIN(lstMemContext(jobData->queueList))
{
for (unsigned int queueIdx = 0; queueIdx < strLstSize(targetList) + queueOffset; queueIdx++)
{
List *queue = lstNewP(sizeof(ManifestFile *), .comparator = backupProcessQueueComparator);
List *const queue = lstNewP(sizeof(ManifestFile *), .comparator = backupProcessQueueComparator);
lstAdd(jobData->queueList, &queue);
}
}
@ -1870,7 +1873,7 @@ backupProcessQueue(const BackupData *const backupData, Manifest *const manifest,
// Helper to calculate the next queue to scan based on the client index
static int
backupJobQueueNext(unsigned int clientIdx, int queueIdx, unsigned int queueTotal)
backupJobQueueNext(const unsigned int clientIdx, int queueIdx, const unsigned int queueTotal)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(UINT, clientIdx);
@ -1892,7 +1895,7 @@ backupJobQueueNext(unsigned int clientIdx, int queueIdx, unsigned int queueTotal
// Callback to fetch backup jobs for the parallel executor
static ProtocolParallelJob *
backupJobCallback(void *data, unsigned int clientIdx)
backupJobCallback(void *const data, const unsigned int clientIdx)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM_P(VOID, data);
@ -1906,24 +1909,24 @@ backupJobCallback(void *data, unsigned int clientIdx)
MEM_CONTEXT_TEMP_BEGIN()
{
// Get a new job if there are any left
BackupJobData *jobData = data;
BackupJobData *const jobData = data;
// Determine where to begin scanning the queue (we'll stop when we get back here). When copying from the primary during
// backup from standby only queue 0 will be used.
unsigned int queueOffset = jobData->backupStandby && clientIdx > 0 ? 1 : 0;
const unsigned int queueOffset = jobData->backupStandby && clientIdx > 0 ? 1 : 0;
int queueIdx =
jobData->backupStandby && clientIdx == 0 ? 0 : (int)(clientIdx % (lstSize(jobData->queueList) - queueOffset));
int queueEnd = queueIdx;
const int queueEnd = queueIdx;
// Create backup job
ProtocolCommand *command = protocolCommandNew(PROTOCOL_COMMAND_BACKUP_FILE);
ProtocolCommand *const command = protocolCommandNew(PROTOCOL_COMMAND_BACKUP_FILE);
PackWrite *param = NULL;
uint64_t fileTotal = 0;
uint64_t fileSize = 0;
do
{
List *queue = *(List **)lstGet(jobData->queueList, (unsigned int)queueIdx + queueOffset);
List *const queue = *(List **)lstGet(jobData->queueList, (unsigned int)queueIdx + queueOffset);
unsigned int fileIdx = 0;
bool bundle = jobData->bundle;
const String *fileName = NULL;
@ -2072,8 +2075,8 @@ backupProcess(
const BackupType backupType = manifestData(manifest)->backupType;
const String *const backupLabel = manifestData(manifest)->backupLabel;
const String *const backupPathExp = strNewFmt(STORAGE_REPO_BACKUP "/%s", strZ(backupLabel));
bool hardLink = cfgOptionBool(cfgOptRepoHardlink) && storageFeature(storageRepoWrite(), storageFeatureHardLink);
bool backupStandby = cfgOptionBool(cfgOptBackupStandby);
const bool hardLink = cfgOptionBool(cfgOptRepoHardlink) && storageFeature(storageRepoWrite(), storageFeatureHardLink);
const bool backupStandby = cfgOptionBool(cfgOptBackupStandby);
BackupJobData jobData =
{
@ -2152,7 +2155,7 @@ backupProcess(
sizeTotal = backupProcessQueue(backupData, manifest, &jobData);
// Create the parallel executor
ProtocolParallel *parallelExec = protocolParallelNew(
ProtocolParallel *const parallelExec = protocolParallelNew(
cfgOptionUInt64(cfgOptProtocolTimeout) / 2, backupJobCallback, &jobData);
// First client is always on the primary
@ -2160,14 +2163,14 @@ backupProcess(
// Create the rest of the clients on the primary or standby depending on the value of backup-standby. Note that standby
// backups don't count the primary client in process-max.
unsigned int processMax = cfgOptionUInt(cfgOptProcessMax) + (backupStandby ? 1 : 0);
unsigned int pgIdx = backupStandby ? backupData->pgIdxStandby : backupData->pgIdxPrimary;
const unsigned int processMax = cfgOptionUInt(cfgOptProcessMax) + (backupStandby ? 1 : 0);
const unsigned int pgIdx = backupStandby ? backupData->pgIdxStandby : backupData->pgIdxPrimary;
for (unsigned int processIdx = 2; processIdx <= processMax; processIdx++)
protocolParallelClientAdd(parallelExec, protocolLocalGet(protocolStorageTypePg, pgIdx, processIdx));
// Maintain a list of files that need to be removed from the manifest when the backup is complete
StringList *fileRemove = strLstNew();
StringList *const fileRemove = strLstNew();
// Determine how often the manifest will be saved (every one percent or threshold size, whichever is greater)
uint64_t manifestSaveLast = 0;
@ -2187,11 +2190,11 @@ backupProcess(
{
do
{
unsigned int completed = protocolParallelProcess(parallelExec);
const unsigned int completed = protocolParallelProcess(parallelExec);
for (unsigned int jobIdx = 0; jobIdx < completed; jobIdx++)
{
ProtocolParallelJob *job = protocolParallelResult(parallelExec);
ProtocolParallelJob *const job = protocolParallelResult(parallelExec);
backupJobResult(
manifest,
@ -2302,8 +2305,8 @@ backupArchiveCheckCopy(const BackupData *const backupData, Manifest *const manif
{
MEM_CONTEXT_TEMP_BEGIN()
{
uint64_t lsnStart = pgLsnFromStr(manifestData(manifest)->lsnStart);
uint64_t lsnStop = pgLsnFromStr(manifestData(manifest)->lsnStop);
const uint64_t lsnStart = pgLsnFromStr(manifestData(manifest)->lsnStart);
const uint64_t lsnStop = pgLsnFromStr(manifestData(manifest)->lsnStop);
LOG_INFO_FMT(
"check archive for segment(s) %s:%s",
@ -2314,20 +2317,20 @@ backupArchiveCheckCopy(const BackupData *const backupData, Manifest *const manif
backupManifestSaveCopy(manifest, cipherPassBackup, false);
// Use base path to set ownership and mode
const ManifestPath *basePath = manifestPathFind(manifest, MANIFEST_TARGET_PGDATA_STR);
const ManifestPath *const basePath = manifestPathFind(manifest, MANIFEST_TARGET_PGDATA_STR);
// Loop through all the segments in the lsn range
StringList *walSegmentList = pgLsnRangeToWalSegmentList(
const StringList *const walSegmentList = pgLsnRangeToWalSegmentList(
backupData->timeline, lsnStart, lsnStop, backupData->walSegmentSize);
for (unsigned int walSegmentIdx = 0; walSegmentIdx < strLstSize(walSegmentList); walSegmentIdx++)
{
MEM_CONTEXT_TEMP_BEGIN()
{
const String *walSegment = strLstGet(walSegmentList, walSegmentIdx);
const String *const walSegment = strLstGet(walSegmentList, walSegmentIdx);
// Find the actual wal segment file in the archive
const String *archiveFile = walSegmentFind(
const String *const archiveFile = walSegmentFind(
storageRepo(), backupData->archiveId, walSegment, cfgOptionUInt64(cfgOptArchiveTimeout));
if (cfgOptionBool(cfgOptArchiveCopy))
@ -2336,14 +2339,14 @@ backupArchiveCheckCopy(const BackupData *const backupData, Manifest *const manif
LOG_DETAIL_FMT("copy segment %s to backup", strZ(walSegment));
// Get compression type of the WAL segment and backup
CompressType archiveCompressType = compressTypeFromName(archiveFile);
CompressType backupCompressType = compressTypeEnum(cfgOptionStrId(cfgOptCompressType));
const CompressType archiveCompressType = compressTypeFromName(archiveFile);
const CompressType backupCompressType = compressTypeEnum(cfgOptionStrId(cfgOptCompressType));
// Open the archive file
StorageRead *read = storageNewReadP(
StorageRead *const read = storageNewReadP(
storageRepo(),
strNewFmt(STORAGE_REPO_ARCHIVE "/%s/%s", strZ(backupData->archiveId), strZ(archiveFile)));
IoFilterGroup *filterGroup = ioReadFilterGroup(storageReadIo(read));
IoFilterGroup *const filterGroup = ioReadFilterGroup(storageReadIo(read));
// Decrypt with archive key if encrypted
cipherBlockFilterGroupAdd(
@ -2371,7 +2374,7 @@ backupArchiveCheckCopy(const BackupData *const backupData, Manifest *const manif
ioFilterGroupAdd(filterGroup, ioSizeNew());
// Copy the file
const String *manifestName = strNewFmt(
const String *const manifestName = strNewFmt(
MANIFEST_TARGET_PGDATA "/%s/%s", strZ(pgWalPath(manifestData(manifest)->pgVersion)), strZ(walSegment));
storageCopyP(
@ -2443,14 +2446,14 @@ backupComplete(InfoBackup *const infoBackup, Manifest *const manifest)
// is required. We can't just do a straight copy since the destination needs to be compressed and that must happen before
// encryption in order to be efficient. Compression will always be gz for compatibility and since it is always available.
// -------------------------------------------------------------------------------------------------------------------------
StorageRead *manifestRead = storageNewReadP(
StorageRead *const manifestRead = storageNewReadP(
storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strZ(backupLabel)));
cipherBlockFilterGroupAdd(
ioReadFilterGroup(storageReadIo(manifestRead)), cfgOptionStrId(cfgOptRepoCipherType), cipherModeDecrypt,
infoPgCipherPass(infoBackupPg(infoBackup)));
StorageWrite *manifestWrite = storageNewWriteP(
StorageWrite *const manifestWrite = storageNewWriteP(
storageRepoWrite(),
strNewFmt(
STORAGE_REPO_BACKUP "/" BACKUP_PATH_HISTORY "/%s/%s.manifest%s", strZ(strSubN(backupLabel, 0, 4)),
@ -2518,27 +2521,27 @@ cmdBackup(void)
}
// Load backup.info
InfoBackup *infoBackup = infoBackupLoadFileReconstruct(
InfoBackup *const infoBackup = infoBackupLoadFileReconstruct(
storageRepo(), INFO_BACKUP_PATH_FILE_STR, cfgOptionStrId(cfgOptRepoCipherType), cfgOptionStrNull(cfgOptRepoCipherPass));
InfoPgData infoPg = infoPgDataCurrent(infoBackupPg(infoBackup));
const String *cipherPassBackup = infoPgCipherPass(infoBackupPg(infoBackup));
const InfoPgData infoPg = infoPgDataCurrent(infoBackupPg(infoBackup));
const String *const cipherPassBackup = infoPgCipherPass(infoBackupPg(infoBackup));
// Get pg storage and database objects
BackupData *backupData = backupInit(infoBackup);
BackupData *const backupData = backupInit(infoBackup);
// Get the start timestamp which will later be written into the manifest to track total backup time
time_t timestampStart = backupTime(backupData, false);
const time_t timestampStart = backupTime(backupData, false);
// Check if there is a prior manifest when backup type is diff/incr
Manifest *manifestPrior = backupBuildIncrPrior(infoBackup);
Manifest *const manifestPrior = backupBuildIncrPrior(infoBackup);
// Start the backup
BackupStartResult backupStartResult = backupStart(backupData);
const BackupStartResult backupStartResult = backupStart(backupData);
// Build the manifest
const ManifestBlockIncrMap blockIncrMap = backupBlockIncrMap();
Manifest *manifest = manifestNewBuild(
Manifest *const manifest = manifestNewBuild(
backupData->storagePrimary, infoPg.version, infoPg.catalogVersion, timestampStart, cfgOptionBool(cfgOptOnline),
cfgOptionBool(cfgOptChecksumPage), cfgOptionBool(cfgOptRepoBundle), cfgOptionBool(cfgOptRepoBlock), &blockIncrMap,
strLstNewVarLst(cfgOptionLst(cfgOptExclude)), backupStartResult.tablespaceList);
@ -2582,7 +2585,7 @@ cmdBackup(void)
}
// Stop the backup
BackupStopResult backupStopResult = backupStop(backupData, manifest);
const BackupStopResult backupStopResult = backupStop(backupData, manifest);
// Complete manifest
manifestBuildComplete(

View File

@ -47,7 +47,7 @@ backupFileRepoPath(const String *const backupLabel, const BackupFileRepoPathPara
/**********************************************************************************************************************************/
FN_EXTERN String *
backupLabelFormat(BackupType type, const String *backupLabelPrior, time_t timestamp)
backupLabelFormat(const BackupType type, const String *const backupLabelPrior, const time_t timestamp)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STRING_ID, type);
@ -67,7 +67,7 @@ backupLabelFormat(BackupType type, const String *backupLabelPrior, time_t timest
"unable to format time");
// If full label
String *result = NULL;
String *result;
if (type == backupTypeFull)
{
@ -148,7 +148,7 @@ backupRegExp(const BackupRegExpParam param)
/**********************************************************************************************************************************/
FN_EXTERN void
backupLinkLatest(const String *backupLabel, unsigned int repoIdx)
backupLinkLatest(const String *const backupLabel, const unsigned int repoIdx)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(STRING, backupLabel);

View File

@ -26,7 +26,7 @@ Backup File
Helper functions
***********************************************************************************************************************************/
static unsigned int
segmentNumber(const String *pgFile)
segmentNumber(const String *const pgFile)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(STRING, pgFile);
@ -60,12 +60,10 @@ backupFile(
ASSERT(fileList != NULL && !lstEmpty(fileList));
// Backup file results
List *result = NULL;
List *const result = lstNewP(sizeof(BackupFileResult));
MEM_CONTEXT_TEMP_BEGIN()
{
result = lstNewP(sizeof(BackupFileResult));
// Check files to determine which ones need to be copied
for (unsigned int fileIdx = 0; fileIdx < lstSize(fileList); fileIdx++)
{
@ -89,7 +87,7 @@ backupFile(
// Generate checksum/size for the pg file. Only read as many bytes as passed in pgFileSize. If the file has
// grown since the manifest was built we don't need to consider the extra bytes since they will be replayed from
// WAL during recovery.
IoRead *read = storageReadIo(
IoRead *const read = storageReadIo(
storageNewReadP(
storagePg(), file->pgFile, .ignoreMissing = file->pgFileIgnoreMissing,
.limit = file->pgFileCopyExactSize ? VARUINT64(file->pgFileSize) : NULL));
@ -101,7 +99,7 @@ backupFile(
{
const Buffer *const pgTestChecksum = pckReadBinP(
ioFilterGroupResultP(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE));
uint64_t pgTestSize = pckReadU64P(ioFilterGroupResultP(ioReadFilterGroup(read), SIZE_FILTER_TYPE));
const uint64_t pgTestSize = pckReadU64P(ioFilterGroupResultP(ioReadFilterGroup(read), SIZE_FILTER_TYPE));
// Does the pg file match?
if (file->pgFileSize == pgTestSize && bufEq(file->pgFileChecksum, pgTestChecksum))
@ -142,7 +140,7 @@ backupFile(
else if (!file->pgFileDelta || pgFileMatch)
{
// Generate checksum/size for the repo file
IoRead *read = storageReadIo(storageNewReadP(storageRepo(), repoFile));
IoRead *const read = storageReadIo(storageNewReadP(storageRepo(), repoFile));
ioFilterGroupAdd(ioReadFilterGroup(read), cryptoHashNew(hashTypeSha1));
ioFilterGroupAdd(ioReadFilterGroup(read), ioSizeNew());
ioReadDrain(read);
@ -150,7 +148,7 @@ backupFile(
// Test checksum/size
const Buffer *const pgTestChecksum = pckReadBinP(
ioFilterGroupResultP(ioReadFilterGroup(read), CRYPTO_HASH_FILTER_TYPE));
uint64_t pgTestSize = pckReadU64P(ioFilterGroupResultP(ioReadFilterGroup(read), SIZE_FILTER_TYPE));
const uint64_t pgTestSize = pckReadU64P(ioFilterGroupResultP(ioReadFilterGroup(read), SIZE_FILTER_TYPE));
// No need to recopy if checksum/size match. When the repo checksum is missing still compare to repo size
// since the repo checksum should only be missing when the repo file was not compressed/encrypted, i.e. the
@ -198,7 +196,7 @@ backupFile(
// replayed from WAL during recovery.
bool repoChecksum = false;
StorageRead *read = storageNewReadP(
StorageRead *const read = storageNewReadP(
storagePg(), file->pgFile, .ignoreMissing = file->pgFileIgnoreMissing, .compressible = compressible,
.limit = file->pgFileCopyExactSize ? VARUINT64(file->pgFileSize) : NULL);
ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), cryptoHashNew(hashTypeSha1));
@ -358,8 +356,6 @@ backupFile(
// Close the repository file if it was opened
if (write != NULL)
ioWriteClose(storageWriteIo(write));
lstMove(result, memContextPrior());
}
MEM_CONTEXT_TEMP_END();

View File

@ -48,7 +48,7 @@ pageChecksumToLog(const PageChecksum *const this, StringStatic *const debugLog)
Verify page checksums
***********************************************************************************************************************************/
static void
pageChecksumProcess(THIS_VOID, const Buffer *input)
pageChecksumProcess(THIS_VOID, const Buffer *const input)
{
THIS(PageChecksum);
@ -64,7 +64,7 @@ pageChecksumProcess(THIS_VOID, const Buffer *input)
unsigned int pageTotal = (unsigned int)(bufUsed(input) / PG_PAGE_SIZE_DEFAULT);
// If there is a partial page make sure there is enough of it to validate the checksum
unsigned int pageRemainder = (unsigned int)(bufUsed(input) % PG_PAGE_SIZE_DEFAULT);
const unsigned int pageRemainder = (unsigned int)(bufUsed(input) % PG_PAGE_SIZE_DEFAULT);
if (pageRemainder != 0)
{
@ -190,7 +190,7 @@ pageChecksumResult(THIS_VOID)
ASSERT(this != NULL);
Pack *result = NULL;
Pack *result;
MEM_CONTEXT_OBJ_BEGIN(this)
{
@ -246,7 +246,7 @@ pageChecksumNew(const unsigned int segmentNo, const unsigned int segmentPageTota
OBJ_NEW_END();
// Create param list
Pack *paramList = NULL;
Pack *paramList;
MEM_CONTEXT_TEMP_BEGIN()
{
@ -269,7 +269,7 @@ pageChecksumNew(const unsigned int segmentNo, const unsigned int segmentPageTota
FN_EXTERN IoFilter *
pageChecksumNewPack(const Pack *const paramList)
{
IoFilter *result = NULL;
IoFilter *result;
MEM_CONTEXT_TEMP_BEGIN()
{

View File

@ -29,7 +29,7 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
{
// Backup options that apply to all files
const String *const repoFile = pckReadStrP(param);
uint64_t bundleId = pckReadU64P(param);
const uint64_t bundleId = pckReadU64P(param);
const bool bundleRaw = bundleId != 0 ? pckReadBoolP(param) : false;
const unsigned int blockIncrReference = (unsigned int)pckReadU64P(param);
const CompressType repoFileCompressType = (CompressType)pckReadU32P(param);
@ -38,7 +38,7 @@ backupFileProtocol(PackRead *const param, ProtocolServer *const server)
const String *const cipherPass = pckReadStrP(param);
// Build the file list
List *fileList = lstNewP(sizeof(BackupFile));
List *const fileList = lstNewP(sizeof(BackupFile));
while (!pckReadNullP(param))
{