1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2025-04-15 11:36:40 +02:00

Remove double spaces from comments and documentation.

Double spaces have fallen out of favor in recent years because they no longer contribute to readability.

We have been using single spaces and editing related paragraphs for some time, but now it seems best to update the remaining instances to avoid churn in unrelated commits and to make it clearer what spacing contributors should use.
This commit is contained in:
David Steele 2023-05-02 12:57:12 +03:00
parent 5ea7b91bf9
commit 1bd5530a59
130 changed files with 1111 additions and 1113 deletions

View File

@ -803,8 +803,8 @@ sub keys
#################################################################################################################################### ####################################################################################################################################
# test - test a value. # test - test a value.
# #
# Test a value to see if it equals the supplied test value. If no test value is given, tests that the section, key, or subkey # Test a value to see if it equals the supplied test value. If no test value is given, tests that the section, key, or subkey is
# is defined. # defined.
#################################################################################################################################### ####################################################################################################################################
sub test sub test
{ {

View File

@ -117,10 +117,9 @@
<!-- <!--
Building the contributing document has some special requirements because it runs Docker in Docker so the repo path Building the contributing document has some special requirements because it runs Docker in Docker so the repo path
must align on the host and all Docker containers. Run `pgbackrest/doc/doc.pl` from within the home directory of must align on the host and all Docker containers. Run `pgbackrest/doc/doc.pl` from within the home directory of the
the user that will do the doc build, e.g. `home/vagrant`. If the repo is not located directly in the home user that will do the doc build, e.g. `home/vagrant`. If the repo is not located directly in the home directory,
directory, e.g. `/home/vagrant/pgbackrest`, then a symlink may be used, e.g. `/home/vagrant/pgbackrest`, then a symlink may be used, e.g. `ln -s /path/to/repo /home/vagrant/pgbackrest`.
e.g. `ln -s /path/to/repo /home/vagrant/pgbackrest`.
--> -->
<render-source key="contributing" file="../../../CONTRIBUTING.md"/> <render-source key="contributing" file="../../../CONTRIBUTING.md"/>
<render-source key="documentation" file="../../README.md"/> <render-source key="documentation" file="../../README.md"/>

View File

@ -23263,7 +23263,7 @@
{ {
"commit": "bc46aefe61871e27281b9b8c9ffc185c8e2846af", "commit": "bc46aefe61871e27281b9b8c9ffc185c8e2846af",
"date": "2013-11-20 22:24:30 -0500", "date": "2013-11-20 22:24:30 -0500",
"subject": "Fixed for OSX. Do not every use TextEditor on code!" "subject": "Fixed for OSX. Do not ever use TextEditor on code!"
}, },
{ {
"commit": "e67821a23096f4788f6bef71e7d4d361b7d9858f", "commit": "e67821a23096f4788f6bef71e7d4d361b7d9858f",

View File

@ -65,8 +65,8 @@ STRING_DECLARE(WAL_TIMELINE_HISTORY_REGEXP_STR);
/*********************************************************************************************************************************** /***********************************************************************************************************************************
Functions Functions
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
// Remove errors for an archive file. This should be done before forking the async process to prevent a race condition where an // Remove errors for an archive file. This should be done before forking the async process to prevent a race condition where an old
// old error may be reported rather than waiting for the async process to succeed or fail. // error may be reported rather than waiting for the async process to succeed or fail.
FN_EXTERN void archiveAsyncErrorClear(ArchiveMode archiveMode, const String *archiveFile); FN_EXTERN void archiveAsyncErrorClear(ArchiveMode archiveMode, const String *archiveFile);
// Check for ok/error status files in the spool in/out directory. throwOnError determines whether an error will be thrown when an // Check for ok/error status files in the spool in/out directory. throwOnError determines whether an error will be thrown when an

View File

@ -540,9 +540,8 @@ queueNeed(const String *walSegment, bool found, uint64_t queueSize, size_t walSe
MEM_CONTEXT_TEMP_BEGIN() MEM_CONTEXT_TEMP_BEGIN()
{ {
// Determine the first WAL segment for the async process to get. If the WAL segment requested by // Determine the first WAL segment for the async process to get. If the WAL segment requested by PostgreSQL was not found
// PostgreSQL was not found then use that. If the segment was found but the queue is not full then // then use that. If the segment was found but the queue is not full then start with the next segment.
// start with the next segment.
const String *walSegmentFirst = const String *walSegmentFirst =
found ? walSegmentNext(walSegment, walSegmentSize, pgVersion) : walSegment; found ? walSegmentNext(walSegment, walSegmentSize, pgVersion) : walSegment;
@ -715,9 +714,9 @@ cmdArchiveGet(void)
} }
} }
// If the WAL segment has not already been found then start the async process to get it. There's no point in // If the WAL segment has not already been found then start the async process to get it. There's no point in forking
// forking the async process off more than once so track that as well. Use an archive lock to prevent forking if // the async process off more than once so track that as well. Use an archive lock to prevent forking if the async
// the async process was launched by another process. // process was launched by another process.
if (!forked && (!found || !queueFull) && lockAcquireP(.returnOnNoLock = true)) if (!forked && (!found || !queueFull) && lockAcquireP(.returnOnNoLock = true))
{ {
// Get control info // Get control info
@ -736,8 +735,8 @@ cmdArchiveGet(void)
StringList *commandExec = cfgExecParam(cfgCmdArchiveGet, cfgCmdRoleAsync, optionReplace, true, false); StringList *commandExec = cfgExecParam(cfgCmdArchiveGet, cfgCmdRoleAsync, optionReplace, true, false);
strLstInsert(commandExec, 0, cfgExe()); strLstInsert(commandExec, 0, cfgExe());
// Clean the current queue using the list of WAL that we ideally want in the queue. queueNeed() // Clean the current queue using the list of WAL that we ideally want in the queue. queueNeed() will return the
// will return the list of WAL needed to fill the queue and this will be passed to the async process. // list of WAL needed to fill the queue and this will be passed to the async process.
const StringList *queue = queueNeed( const StringList *queue = queueNeed(
walSegment, found, cfgOptionUInt64(cfgOptArchiveGetQueueMax), pgControl.walSegmentSize, walSegment, found, cfgOptionUInt64(cfgOptArchiveGetQueueMax), pgControl.walSegmentSize,
pgControl.version); pgControl.version);

View File

@ -359,8 +359,8 @@ cmdArchivePush(void)
pushed = archiveAsyncStatus(archiveModePush, archiveFile, throwOnError, true); pushed = archiveAsyncStatus(archiveModePush, archiveFile, throwOnError, true);
// If the WAL segment has not already been pushed then start the async process to push it. There's no point in // If the WAL segment has not already been pushed then start the async process to push it. There's no point in
// forking the async process off more than once so track that as well. Use an archive lock to prevent more than // forking the async process off more than once so track that as well. Use an archive lock to prevent more than one
// one async process being launched. // async process being launched.
if (!pushed && !forked && lockAcquireP(.returnOnNoLock = true)) if (!pushed && !forked && lockAcquireP(.returnOnNoLock = true))
{ {
// The async process should not output on the console at all // The async process should not output on the console at all

View File

@ -596,8 +596,8 @@ backupBuildIncrPrior(const InfoBackup *const infoBackup)
"%s backup cannot alter " CFGOPT_COMPRESS_TYPE " option to '%s', reset to value in %s", "%s backup cannot alter " CFGOPT_COMPRESS_TYPE " option to '%s', reset to value in %s",
strZ(cfgOptionDisplay(cfgOptType)), strZ(cfgOptionDisplay(cfgOptCompressType)), strZ(backupLabelPrior)); strZ(cfgOptionDisplay(cfgOptType)), strZ(cfgOptionDisplay(cfgOptCompressType)), strZ(backupLabelPrior));
// Set the compression type back to whatever was in the prior backup. This is not strictly needed since we // Set the compression type back to whatever was in the prior backup. This is not strictly needed since we could
// could store compression type on a per file basis, but it seems simplest and safest for now. // store compression type on a per file basis, but it seems simplest and safest for now.
cfgOptionSet( cfgOptionSet(
cfgOptCompressType, cfgSourceParam, VARSTR(compressTypeStr(manifestPriorData->backupOptionCompressType))); cfgOptCompressType, cfgSourceParam, VARSTR(compressTypeStr(manifestPriorData->backupOptionCompressType)));
@ -614,14 +614,14 @@ backupBuildIncrPrior(const InfoBackup *const infoBackup)
cfgOptCompressLevel, cfgSourceParam, VARINT64(varUInt(manifestPriorData->backupOptionCompressLevel))); cfgOptCompressLevel, cfgSourceParam, VARINT64(varUInt(manifestPriorData->backupOptionCompressLevel)));
} }
// If not defined this backup was done in a version prior to page checksums being introduced. Just set // If not defined this backup was done in a version prior to page checksums being introduced. Just set checksum-page
// checksum-page to false and move on without a warning. Page checksums will start on the next full backup. // to false and move on without a warning. Page checksums will start on the next full backup.
if (manifestData(result)->backupOptionChecksumPage == NULL) if (manifestData(result)->backupOptionChecksumPage == NULL)
{ {
cfgOptionSet(cfgOptChecksumPage, cfgSourceParam, BOOL_FALSE_VAR); cfgOptionSet(cfgOptChecksumPage, cfgSourceParam, BOOL_FALSE_VAR);
} }
// Don't allow the checksum-page option to change in a diff or incr backup. This could be confusing as only // Don't allow the checksum-page option to change in a diff or incr backup. This could be confusing as only certain
// certain files would be checksummed and the list could be incomplete during reporting. // files would be checksummed and the list could be incomplete during reporting.
else else
{ {
const bool checksumPagePrior = varBool(manifestData(result)->backupOptionChecksumPage); const bool checksumPagePrior = varBool(manifestData(result)->backupOptionChecksumPage);
@ -2472,8 +2472,8 @@ backupComplete(InfoBackup *const infoBackup, Manifest *const manifest)
if (storageFeature(storageRepoWrite(), storageFeaturePathSync)) if (storageFeature(storageRepoWrite(), storageFeaturePathSync))
storagePathSyncP(storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/" BACKUP_PATH_HISTORY)); storagePathSyncP(storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/" BACKUP_PATH_HISTORY));
// Create a symlink to the most recent backup if supported. This link is purely informational for the user and is never // Create a symlink to the most recent backup if supported. This link is purely informational for the user and is never used
// used by us since symlinks are not supported on all storage types. // by us since symlinks are not supported on all storage types.
// ------------------------------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------------------------------
backupLinkLatest(backupLabel, cfgOptionGroupIdxDefault(cfgOptGrpRepo)); backupLinkLatest(backupLabel, cfgOptionGroupIdxDefault(cfgOptGrpRepo));

View File

@ -157,8 +157,8 @@ backupLinkLatest(const String *const backupLabel, const unsigned int repoIdx)
MEM_CONTEXT_TEMP_BEGIN() MEM_CONTEXT_TEMP_BEGIN()
{ {
// Create a symlink to the most recent backup if supported. This link is purely informational for the user and is never // Create a symlink to the most recent backup if supported. This link is purely informational for the user and is never used
// used by us since symlinks are not supported on all storage types. // by us since symlinks are not supported on all storage types.
// ------------------------------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------------------------------
const String *const latestLink = storagePathP(storageRepoIdx(repoIdx), STRDEF(STORAGE_REPO_BACKUP "/" BACKUP_LINK_LATEST)); const String *const latestLink = storagePathP(storageRepoIdx(repoIdx), STRDEF(STORAGE_REPO_BACKUP "/" BACKUP_LINK_LATEST));

View File

@ -179,9 +179,9 @@ cmdStorageGet(void)
{ {
result = storageGetProcess(ioFdWriteNew(STRDEF("stdout"), STDOUT_FILENO, cfgOptionUInt64(cfgOptIoTimeout))); result = storageGetProcess(ioFdWriteNew(STRDEF("stdout"), STDOUT_FILENO, cfgOptionUInt64(cfgOptIoTimeout)));
} }
// Ignore write errors because it's possible (even likely) that this output is being piped to something like head which // Ignore write errors because it's possible (even likely) that this output is being piped to something like head which will
// will exit when it gets what it needs and leave us writing to a broken pipe. It would be better to just ignore the broken // exit when it gets what it needs and leave us writing to a broken pipe. It would be better to just ignore the broken pipe
// pipe error but currently we don't store system error codes. // error but currently we don't store system error codes.
CATCH(FileWriteError) CATCH(FileWriteError)
{ {
} }

View File

@ -188,9 +188,9 @@ cmdStorageList(void)
{ {
storageListRender(ioFdWriteNew(STRDEF("stdout"), STDOUT_FILENO, cfgOptionUInt64(cfgOptIoTimeout))); storageListRender(ioFdWriteNew(STRDEF("stdout"), STDOUT_FILENO, cfgOptionUInt64(cfgOptIoTimeout)));
} }
// Ignore write errors because it's possible (even likely) that this output is being piped to something like head which // Ignore write errors because it's possible (even likely) that this output is being piped to something like head which will
// will exit when it gets what it needs and leave us writing to a broken pipe. It would be better to just ignore the broken // exit when it gets what it needs and leave us writing to a broken pipe. It would be better to just ignore the broken pipe
// pipe error but currently we don't store system error codes. // error but currently we don't store system error codes.
CATCH(FileWriteError) CATCH(FileWriteError)
{ {
} }

View File

@ -804,8 +804,8 @@ restoreManifestOwner(const Manifest *const manifest, const String **const rootRe
} }
} }
// Else set owners to NULL. This means we won't make any attempt to update ownership and will just leave it as written by // Else set owners to NULL. This means we won't make any attempt to update ownership and will just leave it as written by
// the current user/group. If there are existing files that are not owned by the current user/group then we will attempt // the current user/group. If there are existing files that are not owned by the current user/group then we will attempt to
// to update them, which will generally cause an error, though some systems allow updates to the group ownership. // update them, which will generally cause an error, though some systems allow updates to the group ownership.
// ------------------------------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------------------------------
else else
{ {
@ -1127,9 +1127,9 @@ restoreCleanBuild(const Manifest *const manifest, const String *const rootReplac
strLstSort(cleanData->fileIgnore, sortOrderAsc); strLstSort(cleanData->fileIgnore, sortOrderAsc);
// Check that the path exists. If not, there's no need to do any cleaning and we'll attempt to create it later. // Check that the path exists. If not, there's no need to do any cleaning and we'll attempt to create it later. Don't
// Don't log check for the same path twice. There can be multiple links to files in the same path, but logging it more // log check for the same path twice. There can be multiple links to files in the same path, but logging it more than
// than once makes the logs noisy and looks like a bug. // once makes the logs noisy and looks like a bug.
if (!strLstExists(pathChecked, cleanData->targetPath)) if (!strLstExists(pathChecked, cleanData->targetPath))
LOG_DETAIL_FMT("check '%s' exists", strZ(cleanData->targetPath)); LOG_DETAIL_FMT("check '%s' exists", strZ(cleanData->targetPath));
@ -1201,8 +1201,8 @@ restoreCleanBuild(const Manifest *const manifest, const String *const rootReplac
// Step 2: Clean target directories // Step 2: Clean target directories
// ------------------------------------------------------------------------------------------------------------------------- // -------------------------------------------------------------------------------------------------------------------------
// Delete the pg_control file (if it exists) so the cluster cannot be started if restore does not complete. Sync the path // Delete the pg_control file (if it exists) so the cluster cannot be started if restore does not complete. Sync the path so
// so the file does not return, zombie-like, in the case of a host crash. // the file does not return, zombie-like, in the case of a host crash.
if (storageExistsP(storagePg(), STRDEF(PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL))) if (storageExistsP(storagePg(), STRDEF(PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL)))
{ {
LOG_DETAIL_FMT( LOG_DETAIL_FMT(
@ -1596,8 +1596,8 @@ restoreRecoveryOption(unsigned int pgVersion)
if (!strLstExists(recoveryOptionKey, RESTORE_COMMAND_STR)) if (!strLstExists(recoveryOptionKey, RESTORE_COMMAND_STR))
{ {
// Null out options that it does not make sense to pass from the restore command to archive-get. All of these have // Null out options that it does not make sense to pass from the restore command to archive-get. All of these have
// reasonable defaults so there is no danger of an error -- they just might not be optimal. In any case, it seems // reasonable defaults so there is no danger of an error -- they just might not be optimal. In any case, it seems better
// better than, for example, passing --process-max=32 to archive-get because it was specified for restore. // than, for example, passing --process-max=32 to archive-get because it was specified for restore.
KeyValue *optionReplace = kvNew(); KeyValue *optionReplace = kvNew();
kvPut(optionReplace, VARSTRDEF(CFGOPT_EXEC_ID), NULL); kvPut(optionReplace, VARSTRDEF(CFGOPT_EXEC_ID), NULL);

View File

@ -80,8 +80,8 @@ lz4CompressFreeResource(THIS_VOID)
Compress data Compress data
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
// Helper to return a buffer where output will be written. If there is enough space in the provided output buffer then use it, // Helper to return a buffer where output will be written. If there is enough space in the provided output buffer then use it,
// otherwise allocate an internal buffer to hold the compressed data. Once we start using the internal buffer we'll need to // otherwise allocate an internal buffer to hold the compressed data. Once we start using the internal buffer we'll need to continue
// continue using it until it is completely flushed. // using it until it is completely flushed.
static Buffer * static Buffer *
lz4CompressBuffer(Lz4Compress *this, size_t required, Buffer *output) lz4CompressBuffer(Lz4Compress *this, size_t required, Buffer *output)
{ {

View File

@ -161,8 +161,8 @@ cipherBlockProcessBlock(CipherBlock *this, const unsigned char *source, size_t s
source += headerExpected - this->headerSize; source += headerExpected - this->headerSize;
sourceSize -= headerExpected - this->headerSize; sourceSize -= headerExpected - this->headerSize;
// The first bytes of the file to decrypt should be equal to the magic. If not then this is not an // The first bytes of the file to decrypt should be equal to the magic. If not then this is not an encrypted file,
// encrypted file, or at least not in a format we recognize. // or at least not in a format we recognize.
if (!this->raw && memcmp(this->header, CIPHER_BLOCK_MAGIC, CIPHER_BLOCK_MAGIC_SIZE) != 0) if (!this->raw && memcmp(this->header, CIPHER_BLOCK_MAGIC, CIPHER_BLOCK_MAGIC_SIZE) != 0)
THROW(CryptoError, "cipher header invalid"); THROW(CryptoError, "cipher header invalid");
} }
@ -321,8 +321,8 @@ cipherBlockProcess(THIS_VOID, const Buffer *source, Buffer *destination)
if (source == NULL) if (source == NULL)
{ {
// If salt was not generated it means that process() was never called with any data. It's OK to encrypt a zero byte // If salt was not generated it means that process() was never called with any data. It's OK to encrypt a zero byte file
// file but we need to call process to generate the header. // but we need to call process to generate the header.
if (!this->saltDone) if (!this->saltDone)
{ {
destinationSizeActual = cipherBlockProcessBlock(this, NULL, 0, bufRemainsPtr(outputActual)); destinationSizeActual = cipherBlockProcessBlock(this, NULL, 0, bufRemainsPtr(outputActual));
@ -400,8 +400,8 @@ cipherBlockNew(const CipherMode mode, const CipherType cipherType, const Buffer
// Init crypto subsystem // Init crypto subsystem
cryptoInit(); cryptoInit();
// Lookup cipher by name. This means the ciphers passed in must exactly match a name expected by OpenSSL. This is a good // Lookup cipher by name. This means the ciphers passed in must exactly match a name expected by OpenSSL. This is a good thing
// thing since the name required by the openssl command-line tool will match what is used by pgBackRest. // since the name required by the openssl command-line tool will match what is used by pgBackRest.
String *const cipherTypeStr = strIdToStr(cipherType); String *const cipherTypeStr = strIdToStr(cipherType);
const EVP_CIPHER *cipher = EVP_get_cipherbyname(strZ(cipherTypeStr)); const EVP_CIPHER *cipher = EVP_get_cipherbyname(strZ(cipherTypeStr));

View File

@ -108,8 +108,8 @@ ioFilterProcessInOut(IoFilter *this, const Buffer *input, Buffer *output)
} }
/*********************************************************************************************************************************** /***********************************************************************************************************************************
If done is not defined by the filter then check inputSame. If inputSame is true then the filter is not done. Even if the filter If done is not defined by the filter then check inputSame. If inputSame is true then the filter is not done. Even if the filter is
is done the interface will not report done until the interface is flushing. done the interface will not report done until the interface is flushing.
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
FN_EXTERN bool FN_EXTERN bool
ioFilterDone(const IoFilter *this) ioFilterDone(const IoFilter *this)

View File

@ -3,8 +3,8 @@ IO Filter Interface
Filters can modify an i/o stream (e.g. GzCompress, GzDecompress), generate a result (e.g. IoSize, CryptoHash), or even do both. Filters can modify an i/o stream (e.g. GzCompress, GzDecompress), generate a result (e.g. IoSize, CryptoHash), or even do both.
A filter is created using a constructor implemented by each filter (e.g. ioBufferNew). Filter processing is managed by A filter is created using a constructor implemented by each filter (e.g. ioBufferNew). Filter processing is managed by IoFilterGroup
IoFilterGroup so the only user facing functions are ioFilterResult() and ioFilterType(). so the only user facing functions are ioFilterResult() and ioFilterType().
Information on implementing a filter is in filter.internal.h. Information on implementing a filter is in filter.internal.h.
***********************************************************************************************************************************/ ***********************************************************************************************************************************/

View File

@ -28,8 +28,8 @@ Constructors
typedef struct IoFilterInterface typedef struct IoFilterInterface
{ {
// Indicates that filter processing is done. This is used for filters that have additional data to be flushed even after all // Indicates that filter processing is done. This is used for filters that have additional data to be flushed even after all
// input has been processed. Compression and encryption filters will usually need to implement done. If done is not // input has been processed. Compression and encryption filters will usually need to implement done. If done is not implemented
// implemented then it will always return true if all input has been consumed, i.e. if inputSame returns false. // then it will always return true if all input has been consumed, i.e. if inputSame returns false.
bool (*done)(const void *driver); bool (*done)(const void *driver);
// Processing function for filters that do not produce output. Note that result must be implemented in this case (or else what // Processing function for filters that do not produce output. Note that result must be implemented in this case (or else what

View File

@ -198,8 +198,8 @@ ioFilterGroupOpen(IoFilterGroup *this)
else else
{ {
// This cast is required because the compiler can't guarantee the const-ness of this object, i.e. it could be // This cast is required because the compiler can't guarantee the const-ness of this object, i.e. it could be
// modified in other parts of the code. This is actually expected and the only reason we need this const is to // modified in other parts of the code. This is actually expected and the only reason we need this const is to match
// match the const-ness of the input buffer provided by the caller. // the const-ness of the input buffer provided by the caller.
filterData->input = (const Buffer **)lastOutputBuffer; filterData->input = (const Buffer **)lastOutputBuffer;
filterData->inputLocal = *lastOutputBuffer; filterData->inputLocal = *lastOutputBuffer;
} }

View File

@ -1,11 +1,11 @@
/*********************************************************************************************************************************** /***********************************************************************************************************************************
IO Filter Group IO Filter Group
Process data through an arbitrary group of filters in the order added by the user using ioFilterGroupAdd(). After processing Process data through an arbitrary group of filters in the order added by the user using ioFilterGroupAdd(). After processing results
results can be gathered using ioFilterGroupResult() for any filters that produce results. can be gathered using ioFilterGroupResult() for any filters that produce results.
Processing is complex and asymmetric for read/write so should be done via the IoRead and IoWrite objects. General users need Processing is complex and asymmetric for read/write so should be done via the IoRead and IoWrite objects. General users need only
only call ioFilterGroupNew(), ioFilterGroupAdd(), and ioFilterGroupResult(). call ioFilterGroupNew(), ioFilterGroupAdd(), and ioFilterGroupResult().
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
#ifndef COMMON_IO_FILTER_GROUP_H #ifndef COMMON_IO_FILTER_GROUP_H
#define COMMON_IO_FILTER_GROUP_H #define COMMON_IO_FILTER_GROUP_H

View File

@ -139,9 +139,9 @@ httpRequestProcess(HttpRequest *this, bool waitForResponse, bool contentCache)
{ {
result = httpResponseNew(session, httpRequestVerb(this), contentCache); result = httpResponseNew(session, httpRequestVerb(this), contentCache);
// Retry when response code is 5xx. These errors generally represent a server error for a request that // Retry when response code is 5xx. These errors generally represent a server error for a request that looks
// looks valid. There are a few errors that might be permanently fatal but they are rare and it seems best // valid. There are a few errors that might be permanently fatal but they are rare and it seems best not to
// not to try and pick and choose errors in this class to retry. // try and pick and choose errors in this class to retry.
if (httpResponseCode(result) / 100 == HTTP_RESPONSE_CODE_RETRY_CLASS) if (httpResponseCode(result) / 100 == HTTP_RESPONSE_CODE_RETRY_CLASS)
THROW_FMT(ServiceError, "[%u] %s", httpResponseCode(result), strZ(httpResponseReason(result))); THROW_FMT(ServiceError, "[%u] %s", httpResponseCode(result), strZ(httpResponseReason(result)));

View File

@ -3,8 +3,8 @@ IO Read Interface
Objects that read from some IO source (file, socket, etc.) are implemented using this interface. All objects are required to Objects that read from some IO source (file, socket, etc.) are implemented using this interface. All objects are required to
implement IoReadProcess and can optionally implement IoReadOpen, IoReadClose, or IoReadEof. IoReadOpen and IoReadClose can be used implement IoReadProcess and can optionally implement IoReadOpen, IoReadClose, or IoReadEof. IoReadOpen and IoReadClose can be used
to allocate/open or deallocate/free resources. If IoReadEof is not implemented then ioReadEof() will always return false. An to allocate/open or deallocate/free resources. If IoReadEof is not implemented then ioReadEof() will always return false. An example
example of an IoRead object is IoBufferRead. of an IoRead object is IoBufferRead.
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
#ifndef COMMON_IO_READ_H #ifndef COMMON_IO_READ_H
#define COMMON_IO_READ_H #define COMMON_IO_READ_H

View File

@ -49,8 +49,8 @@ call site by supplying commonly-used values.
Note that it's possible that not all the macros below will appear in the code. In particular the ERROR and ASSERT macros should not Note that it's possible that not all the macros below will appear in the code. In particular the ERROR and ASSERT macros should not
be used directly. They are included for completeness and future usage. be used directly. They are included for completeness and future usage.
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
// Define a macro to test logAny() that can be removed when performing coverage testing. Checking logAny() saves a function call // Define a macro to test logAny() that can be removed when performing coverage testing. Checking logAny() saves a function call for
// for logging calls that won't be output anywhere, but since the macro contains a branch it causes coverage problems. // logging calls that won't be output anywhere, but since the macro contains a branch it causes coverage problems.
#ifdef DEBUG_COVERAGE #ifdef DEBUG_COVERAGE
#define IF_LOG_ANY(logLevel) #define IF_LOG_ANY(logLevel)
#else #else

View File

@ -306,8 +306,8 @@ FN_EXTERN MemContext *memContextCurrent(void);
// Prior context, i.e. the context that was current before the last memContextSwitch() // Prior context, i.e. the context that was current before the last memContextSwitch()
FN_EXTERN MemContext *memContextPrior(void); FN_EXTERN MemContext *memContextPrior(void);
// "top" context. This context is created at initialization and is always present, i.e. it is never freed. The top context is a // "top" context. This context is created at initialization and is always present, i.e. it is never freed. The top context is a good
// good place to put long-lived mem contexts since they won't be automatically freed until the program exits. // place to put long-lived mem contexts since they won't be automatically freed until the program exits.
FN_EXTERN MemContext *memContextTop(void); FN_EXTERN MemContext *memContextTop(void);
// Get total size of mem context and all children // Get total size of mem context and all children

View File

@ -157,8 +157,8 @@ Macros for constant buffers
Frequently used constant buffers can be declared with these macros at compile time rather than dynamically at run time. Frequently used constant buffers can be declared with these macros at compile time rather than dynamically at run time.
Note that buffers created in this way are declared as const so can't be modified or freed by the buf*() methods. Casting to Note that buffers created in this way are declared as const so can't be modified or freed by the buf*() methods. Casting to Buffer *
Buffer * will result in a segfault. will result in a segfault.
By convention all buffer constant identifiers are appended with _BUF. By convention all buffer constant identifiers are appended with _BUF.
***********************************************************************************************************************************/ ***********************************************************************************************************************************/

View File

@ -770,8 +770,8 @@ strPathAbsolute(const String *this, const String *base)
{ {
result = strDup(this); result = strDup(this);
} }
// Else we'll need to construct the absolute path. You would hope we could use realpath() here but it is so broken in the // Else we'll need to construct the absolute path. You would hope we could use realpath() here but it is so broken in the Posix
// Posix spec that is seems best avoided. // spec that is seems best avoided.
else else
{ {
ASSERT(base != NULL); ASSERT(base != NULL);

View File

@ -215,8 +215,8 @@ Macros for constant strings
Frequently used constant strings can be declared with these macros at compile time rather than dynamically at run time. Frequently used constant strings can be declared with these macros at compile time rather than dynamically at run time.
Note that strings created in this way are declared as const so can't be modified or freed by the str*() methods. Casting to Note that strings created in this way are declared as const so can't be modified or freed by the str*() methods. Casting to String *
String * will result in a segfault due to modifying read-only memory. will result in a segfault due to modifying read-only memory.
By convention all string constant identifiers are appended with _STR. By convention all string constant identifiers are appended with _STR.
***********************************************************************************************************************************/ ***********************************************************************************************************************************/

View File

@ -1,8 +1,8 @@
/*********************************************************************************************************************************** /***********************************************************************************************************************************
Command and Option Configuration Command and Option Configuration
This module serves as a database for the configuration options. The configuration rules reside in config/define.c and This module serves as a database for the configuration options. The configuration rules reside in config/define.c and config/parse.c
config/parse.c sets the command and options and determines which options are valid for a command. sets the command and options and determines which options are valid for a command.
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
#ifndef CONFIG_CONFIG_H #ifndef CONFIG_CONFIG_H
#define CONFIG_CONFIG_H #define CONFIG_CONFIG_H
@ -24,8 +24,8 @@ typedef enum
// Called directly by the user. This is the main process of the command that may or may not spawn other command roles. // Called directly by the user. This is the main process of the command that may or may not spawn other command roles.
cfgCmdRoleMain = 0, cfgCmdRoleMain = 0,
// Async worker that is spawned so the main process can return a result while work continues. An async worker may spawn local // Async worker that is spawned so the main process can return a result while work continues. An async worker may spawn local or
// or remote workers. // remote workers.
cfgCmdRoleAsync, cfgCmdRoleAsync,
// Local worker for parallelizing jobs. A local work may spawn a remote worker. // Local worker for parallelizing jobs. A local work may spawn a remote worker.
@ -245,8 +245,8 @@ FN_EXTERN void cfgCommandSet(ConfigCommand commandId, ConfigCommandRole commandR
// pgBackRest exe // pgBackRest exe
FN_EXTERN const String *cfgExe(void); FN_EXTERN const String *cfgExe(void);
// Set option default. Option defaults are generally not set in advance because the vast majority of them are never used. It is // Set option default. Option defaults are generally not set in advance because the vast majority of them are never used. It is more
// more efficient to generate them when they are requested. Some defaults are (e.g. the exe path) are set at runtime. // efficient to generate them when they are requested. Some defaults are (e.g. the exe path) are set at runtime.
FN_EXTERN void cfgOptionDefaultSet(ConfigOption optionId, const Variant *defaultValue); FN_EXTERN void cfgOptionDefaultSet(ConfigOption optionId, const Variant *defaultValue);
// Was the option negated? // Was the option negated?

2
src/configure vendored
View File

@ -5599,4 +5599,4 @@ if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
fi fi
# Generated from src/build/configure.ac sha1 380d8dd159305ac9e6da7816064a41012205ce6f # Generated from src/build/configure.ac sha1 5bd14429291b37c7b69a19b6086863c15530e138

View File

@ -25,8 +25,8 @@ Constants
Function types for loading and saving Function types for loading and saving
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
// The purpose of this callback is to attempt a load (from file or otherwise). Return true when the load is successful or throw an // The purpose of this callback is to attempt a load (from file or otherwise). Return true when the load is successful or throw an
// error. Return false when there are no more loads to try, but always make at least one load attempt. The try parameter will // error. Return false when there are no more loads to try, but always make at least one load attempt. The try parameter will start
// start at 0 and be incremented on each call. // at 0 and be incremented on each call.
// {uncrustify_off - uncrustify unable to parse this statement} // {uncrustify_off - uncrustify unable to parse this statement}
typedef bool InfoLoadCallback(void *data, unsigned int try); typedef bool InfoLoadCallback(void *data, unsigned int try);
// {uncrustify_on} // {uncrustify_on}

View File

@ -1514,8 +1514,8 @@ manifestBuildValidate(Manifest *this, bool delta, time_t copyStart, CompressType
this->pub.data.backupOptionDelta = varNewBool(delta); this->pub.data.backupOptionDelta = varNewBool(delta);
// If online then add one second to the copy start time to allow for database updates during the last second that the // If online then add one second to the copy start time to allow for database updates during the last second that the
// manifest was being built. It's up to the caller to actually wait the remainder of the second, but for comparison // manifest was being built. It's up to the caller to actually wait the remainder of the second, but for comparison purposes
// purposes we want the time when the waiting started. // we want the time when the waiting started.
this->pub.data.backupTimestampCopyStart = copyStart + (this->pub.data.backupOptionOnline ? 1 : 0); this->pub.data.backupTimestampCopyStart = copyStart + (this->pub.data.backupOptionOnline ? 1 : 0);
// This value is not needed in this function, but it is needed for resumed manifests and this is last place to set it before // This value is not needed in this function, but it is needed for resumed manifests and this is last place to set it before
@ -1903,8 +1903,8 @@ typedef struct ManifestLoadData
const Variant *pathUserDefault; // Path default user const Variant *pathUserDefault; // Path default user
} ManifestLoadData; } ManifestLoadData;
// Helper to transform a variant that could be boolean or string into a string. If the boolean is false return NULL else return // Helper to transform a variant that could be boolean or string into a string. If the boolean is false return NULL else return the
// the string. The boolean cannot be true. // string. The boolean cannot be true.
static const String * static const String *
manifestOwnerGet(const Variant *owner) manifestOwnerGet(const Variant *owner)
{ {
@ -2298,8 +2298,8 @@ manifestLoadCallback(void *callbackData, const String *const section, const Stri
// Historically this option meant to add gz compression // Historically this option meant to add gz compression
else if (strEqZ(key, MANIFEST_KEY_OPTION_COMPRESS)) else if (strEqZ(key, MANIFEST_KEY_OPTION_COMPRESS))
manifest->pub.data.backupOptionCompressType = varBool(jsonToVar(value)) ? compressTypeGz : compressTypeNone; manifest->pub.data.backupOptionCompressType = varBool(jsonToVar(value)) ? compressTypeGz : compressTypeNone;
// This new option allows any type of compression to be specified. It must be parsed after the option above so the // This new option allows any type of compression to be specified. It must be parsed after the option above so the value
// value does not get overwritten. Since options are stored in alpha order this should always be true. // does not get overwritten. Since options are stored in alpha order this should always be true.
else if (strEqZ(key, MANIFEST_KEY_OPTION_COMPRESS_TYPE)) else if (strEqZ(key, MANIFEST_KEY_OPTION_COMPRESS_TYPE))
manifest->pub.data.backupOptionCompressType = compressTypeEnum(strIdFromStr(varStr(jsonToVar(value)))); manifest->pub.data.backupOptionCompressType = compressTypeEnum(strIdFromStr(varStr(jsonToVar(value))));
else if (strEqZ(key, MANIFEST_KEY_OPTION_HARDLINK)) else if (strEqZ(key, MANIFEST_KEY_OPTION_HARDLINK))
@ -2734,8 +2734,8 @@ manifestSaveCallback(void *const callbackData, const String *const sectionNext,
jsonWriteUInt64(jsonWriteKeyStrId(json, MANIFEST_KEY_BUNDLE_OFFSET), file.bundleOffset); jsonWriteUInt64(jsonWriteKeyStrId(json, MANIFEST_KEY_BUNDLE_OFFSET), file.bundleOffset);
} }
// Save if the file size is not zero and the checksum exists. The checksum might not exist if this is a partial // Save if the file size is not zero and the checksum exists. The checksum might not exist if this is a partial save
// save performed during a backup. // performed during a backup.
if (file.size != 0 && file.checksumSha1 != NULL) if (file.size != 0 && file.checksumSha1 != NULL)
{ {
jsonWriteStr( jsonWriteStr(

View File

@ -25,8 +25,8 @@ Config include path name. The parent path will vary based on configuration.
#define PROJECT_CONFIG_INCLUDE_PATH "conf.d" #define PROJECT_CONFIG_INCLUDE_PATH "conf.d"
/*********************************************************************************************************************************** /***********************************************************************************************************************************
Format Number -- defines format for info and manifest files as well as on-disk structure. If this number changes then the Format Number -- defines format for info and manifest files as well as on-disk structure. If this number changes then the repository
repository will be invalid unless migration functions are written. will be invalid unless migration functions are written.
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
#define REPOSITORY_FORMAT 5 #define REPOSITORY_FORMAT 5

View File

@ -457,8 +457,8 @@ sub coverageGenerate
$strFile = substr($strLine, 3); $strFile = substr($strLine, 3);
$rhCoverage->{$strFile} = undef; $rhCoverage->{$strFile} = undef;
# Generate a random anchor so new reports will not show links as already followed. This is also an easy way # Generate a random anchor so new reports will not show links as already followed. This is also an easy way to
# to create valid, disambiguos links. # create valid, disambiguos links.
$rhCoverage->{$strFile}{anchor} = sha1_hex(rand(16)); $rhCoverage->{$strFile}{anchor} = sha1_hex(rand(16));
} }
# Mark functions as initially covered # Mark functions as initially covered

View File

@ -1371,8 +1371,8 @@ sub configUpdate
#################################################################################################################################### ####################################################################################################################################
# manifestMunge # manifestMunge
# #
# Allows for munging of the manifest while making it appear to be valid. This is used to create various error conditions that # Allows for munging of the manifest while making it appear to be valid. This is used to create various error conditions that should
# should be caught by the unit tests. # be caught by the unit tests.
#################################################################################################################################### ####################################################################################################################################
sub manifestMunge sub manifestMunge
{ {

View File

@ -154,8 +154,8 @@ sub backupLabel
# Make sure that the timestamp has not already been used by a prior backup. This is unlikely for online backups since there is # Make sure that the timestamp has not already been used by a prior backup. This is unlikely for online backups since there is
# already a wait after the manifest is built but it's still possible if the remote and local systems don't have synchronized # already a wait after the manifest is built but it's still possible if the remote and local systems don't have synchronized
# clocks. In practice this is most useful for making offline testing faster since it allows the wait after manifest build to # clocks. In practice this is most useful for making offline testing faster since it allows the wait after manifest build to be
# be skipped by dealing with any backup label collisions here. # skipped by dealing with any backup label collisions here.
if ($oStorageRepo->list( if ($oStorageRepo->list(
$strRepoBackupPath, $strRepoBackupPath,
{strExpression => {strExpression =>

View File

@ -233,8 +233,8 @@ sub run
# Enabled async archiving # Enabled async archiving
$oHostBackup->configUpdate({&CFGDEF_SECTION_GLOBAL => {'archive-async' => 'y'}}); $oHostBackup->configUpdate({&CFGDEF_SECTION_GLOBAL => {'archive-async' => 'y'}});
# Kick out a bunch of archive logs to exercise async archiving. Only do this when compressed and remote to slow it # Kick out a bunch of archive logs to exercise async archiving. Only do this when compressed and remote to slow it down
# down enough to make it evident that the async process is working. # enough to make it evident that the async process is working.
if ($strCompressType ne NONE && $strBackupDestination eq HOST_BACKUP) if ($strCompressType ne NONE && $strBackupDestination eq HOST_BACKUP)
{ {
&log(INFO, ' multiple wal switches to exercise async archiving'); &log(INFO, ' multiple wal switches to exercise async archiving');
@ -417,8 +417,8 @@ sub run
'insert into test1_zeroed values (1);', 'insert into test1_zeroed values (1);',
{strDb => 'test1', bAutoCommit => true}); {strDb => 'test1', bAutoCommit => true});
# Start a backup so the next backup has to restart it. This test is not required for PostgreSQL >= 9.6 since backups # Start a backup so the next backup has to restart it. This test is not required for PostgreSQL >= 9.6 since backups are run
# are run in non-exclusive mode. # in non-exclusive mode.
if ($oHostDbPrimary->pgVersion() >= PG_VERSION_93 && $oHostDbPrimary->pgVersion() < PG_VERSION_96) if ($oHostDbPrimary->pgVersion() >= PG_VERSION_93 && $oHostDbPrimary->pgVersion() < PG_VERSION_96)
{ {
$oHostDbPrimary->sqlSelectOne("select pg_start_backup('test backup that will cause an error', true)"); $oHostDbPrimary->sqlSelectOne("select pg_start_backup('test backup that will cause an error', true)");
@ -521,8 +521,8 @@ sub run
# Test that the first database has not been restored since --db-include did not include test1 # Test that the first database has not been restored since --db-include did not include test1
my ($strSHA1, $lSize) = storageTest()->hashSize($strDb1TablePath); my ($strSHA1, $lSize) = storageTest()->hashSize($strDb1TablePath);
# Create a zeroed sparse file in the test directory that is the same size as the filenode.map. We need to use the # Create a zeroed sparse file in the test directory that is the same size as the filenode.map. We need to use the posix
# posix driver directly to do this because handles cannot be passed back from the C code. # driver directly to do this because handles cannot be passed back from the C code.
my $oStorageTrunc = new pgBackRestTest::Common::Storage($self->testPath(), new pgBackRestTest::Common::StoragePosix()); my $oStorageTrunc = new pgBackRestTest::Common::Storage($self->testPath(), new pgBackRestTest::Common::StoragePosix());
my $strTestTable = $self->testPath() . "/testtable"; my $strTestTable = $self->testPath() . "/testtable";
@ -713,9 +713,9 @@ sub run
$oHostDbPrimary->clusterStop(); $oHostDbPrimary->clusterStop();
# The timeline to use for this test is subject to change based on tests being added or removed above. The best thing # The timeline to use for this test is subject to change based on tests being added or removed above. The best thing would
# would be to automatically grab the timeline after the restore, but since this test has been stable for a long time # be to automatically grab the timeline after the restore, but since this test has been stable for a long time it does not
# it does not seem worth the effort to automate. # seem worth the effort to automate.
$oHostDbPrimary->restore( $oHostDbPrimary->restore(
undef, $strIncrBackup, undef, $strIncrBackup,
{bDelta => true, strType => CFGOPTVAL_RESTORE_TYPE_STANDBY, strTargetTimeline => 4, iRepo => $iRepoTotal}); {bDelta => true, strType => CFGOPTVAL_RESTORE_TYPE_STANDBY, strTargetTimeline => 4, iRepo => $iRepoTotal});

View File

@ -1,8 +1,8 @@
/*********************************************************************************************************************************** /***********************************************************************************************************************************
Pq Test Harness Pq Test Harness
Scripted testing for PostgreSQL libpq so exact results can be returned for unit testing. See PostgreSQL client unit tests for Scripted testing for PostgreSQL libpq so exact results can be returned for unit testing. See PostgreSQL client unit tests for usage
usage examples. examples.
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
#ifndef TEST_COMMON_HARNESS_PQ_H #ifndef TEST_COMMON_HARNESS_PQ_H
#define TEST_COMMON_HARNESS_PQ_H #define TEST_COMMON_HARNESS_PQ_H
@ -617,7 +617,7 @@ Functions
***********************************************************************************************************************************/ ***********************************************************************************************************************************/
void harnessPqScriptSet(HarnessPq *harnessPqScriptParam); void harnessPqScriptSet(HarnessPq *harnessPqScriptParam);
// Are we strict about requiring PQfinish()? Strict is a good idea for low-level testing of Pq code but is a nuissance for // Are we strict about requiring PQfinish()? Strict is a good idea for low-level testing of Pq code but is a nuisance for
// higher-level testing since it can mask other errors. When not strict, PGfinish() is allowed at any time and does not need to be // higher-level testing since it can mask other errors. When not strict, PGfinish() is allowed at any time and does not need to be
// scripted. // scripted.
void harnessPqScriptStrictSet(bool strict); void harnessPqScriptStrictSet(bool strict);