mirror of
https://github.com/pgbackrest/pgbackrest.git
synced 2025-04-11 11:22:01 +02:00
Remove double spaces from comments and documentation.
Double spaces have fallen out of favor in recent years because they no longer contribute to readability. We have been using single spaces and editing related paragraphs for some time, but now it seems best to update the remaining instances to avoid churn in unrelated commits and to make it clearer what spacing contributors should use.
This commit is contained in:
parent
5ea7b91bf9
commit
1bd5530a59
@ -74,7 +74,7 @@ The example below is not structured like an actual implementation and is intende
|
||||
#### Example: hypothetical basic object construction
|
||||
```c
|
||||
/*
|
||||
* HEADER FILE - see db.h for a complete implementation example
|
||||
* HEADER FILE - see db.h for a complete implementation example
|
||||
*/
|
||||
|
||||
// Typedef the object declared in the C file
|
||||
@ -646,7 +646,7 @@ To add an option, add the following to the `<option-list>` section; if it does n
|
||||
<option id="force" name="Force">
|
||||
<summary>Force a restore.</summary>
|
||||
|
||||
<text>By itself this option forces the <postgres/> data and tablespace paths to be completely overwritten. In combination with <br-option>--delta</br-option> a timestamp/size delta will be performed instead of using checksums.</text>
|
||||
<text>By itself this option forces the <postgres/> data and tablespace paths to be completely overwritten. In combination with <br-option>--delta</br-option> a timestamp/size delta will be performed instead of using checksums.</text>
|
||||
|
||||
<example>y</example>
|
||||
</option>
|
||||
|
@ -58,7 +58,7 @@ Ubuntu 16.04:
|
||||
```
|
||||
RHEL 7:
|
||||
```bash
|
||||
./doc.pl --out=html --include=user-guide --no-cache --var=os-type=rhel --var=os-image=centos:7 --var=package=test/package/pgbackrest-2.08-1.el7.x86_64.rpm
|
||||
./doc.pl --out=html --include=user-guide --no-cache --var=os-type=rhel --var=os-image=centos:7 --var=package=test/package/pgbackrest-2.08-1.el7.x86_64.rpm
|
||||
```
|
||||
RHEL 8:
|
||||
```bash
|
||||
|
@ -31,7 +31,7 @@ to:
|
||||
pgbackrest/test/test.pl --code-count
|
||||
```
|
||||
|
||||
## Build release documentation. Be sure to install latex using the instructions from the Vagrantfile before running this step.
|
||||
## Build release documentation. Be sure to install latex using the instructions from the Vagrantfile before running this step.
|
||||
```
|
||||
pgbackrest/doc/release.pl --build
|
||||
```
|
||||
@ -133,7 +133,7 @@ v2.14: Bug Fix and Improvements
|
||||
- Add user guide for Debian.
|
||||
```
|
||||
|
||||
The first line will be the release title and the rest will be the body. The tag field should be updated with the current version so a tag is created from main. **Be sure to select the release commit explicitly rather than auto-tagging the last commit in main!**
|
||||
The first line will be the release title and the rest will be the body. The tag field should be updated with the current version so a tag is created from main. **Be sure to select the release commit explicitly rather than auto-tagging the last commit in main!**
|
||||
|
||||
## Push web documentation to main and deploy
|
||||
```
|
||||
|
@ -354,7 +354,7 @@ sub process
|
||||
}
|
||||
}
|
||||
|
||||
# If the option did not come from the command also store in global option list. This prevents duplication of commonly
|
||||
# If the option did not come from the command also store in global option list. This prevents duplication of commonly
|
||||
# used options.
|
||||
if ($strOptionSource ne CONFIG_HELP_SOURCE_COMMAND)
|
||||
{
|
||||
|
@ -475,7 +475,7 @@ sub build
|
||||
$oNode->paramSet('depend-default', $strDependPrev);
|
||||
}
|
||||
|
||||
# Set log to true if this section has an execute list. This helps reduce the info logging by only showing sections that are
|
||||
# Set log to true if this section has an execute list. This helps reduce the info logging by only showing sections that are
|
||||
# likely to take a log time.
|
||||
$oNode->paramSet('log', $self->{bExe} && $oNode->nodeList('execute-list', false) > 0 ? true : false);
|
||||
|
||||
|
@ -520,7 +520,7 @@ sub iniRender
|
||||
$bFirst = false;
|
||||
}
|
||||
|
||||
# If there is a checksum write it at the end of the file. Having the checksum at the end of the file allows some major
|
||||
# If there is a checksum write it at the end of the file. Having the checksum at the end of the file allows some major
|
||||
# performance optimizations which we won't implement in Perl, but will make the C code much more efficient.
|
||||
if (!$bRelaxed && defined($oContent->{&INI_SECTION_BACKREST}) && defined($oContent->{&INI_SECTION_BACKREST}{&INI_KEY_CHECKSUM}))
|
||||
{
|
||||
@ -803,8 +803,8 @@ sub keys
|
||||
####################################################################################################################################
|
||||
# test - test a value.
|
||||
#
|
||||
# Test a value to see if it equals the supplied test value. If no test value is given, tests that the section, key, or subkey
|
||||
# is defined.
|
||||
# Test a value to see if it equals the supplied test value. If no test value is given, tests that the section, key, or subkey is
|
||||
# defined.
|
||||
####################################################################################################################################
|
||||
sub test
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ use constant CFGCMD_VERSION => 'version'
|
||||
|
||||
####################################################################################################################################
|
||||
# Command role constants - roles allowed for each command. Commands may have multiple processes that work together to implement
|
||||
# their functionality. These roles allow each process to know what it is supposed to do.
|
||||
# their functionality. These roles allow each process to know what it is supposed to do.
|
||||
####################################################################################################################################
|
||||
# Called directly by the user. This is the main process of the command that may or may not spawn other command roles.
|
||||
use constant CFGCMD_ROLE_MAIN => 'main';
|
||||
@ -400,7 +400,7 @@ foreach my $strKey (sort(keys(%{$rhConfigDefine})))
|
||||
$rhConfigDefine->{$strKey}{&CFGDEF_INTERNAL} = false;
|
||||
}
|
||||
|
||||
# All boolean config options can be negated. Boolean command-line options must be marked for negation individually.
|
||||
# All boolean config options can be negated. Boolean command-line options must be marked for negation individually.
|
||||
if ($rhConfigDefine->{$strKey}{&CFGDEF_TYPE} eq CFGDEF_TYPE_BOOLEAN && defined($rhConfigDefine->{$strKey}{&CFGDEF_SECTION}))
|
||||
{
|
||||
$rhConfigDefine->{$strKey}{&CFGDEF_NEGATE} = true;
|
||||
|
@ -268,7 +268,7 @@ sub contributorTextGet
|
||||
}
|
||||
|
||||
####################################################################################################################################
|
||||
# Find a commit by subject prefix. Error if the prefix appears more than once.
|
||||
# Find a commit by subject prefix. Error if the prefix appears more than once.
|
||||
####################################################################################################################################
|
||||
sub commitFindSubject
|
||||
{
|
||||
|
@ -23,14 +23,14 @@ push @EXPORT, qw(PROJECT_CONF);
|
||||
|
||||
# Project Version Number
|
||||
#
|
||||
# Defines the current version of the BackRest executable. The version number is used to track features but does not affect what
|
||||
# Defines the current version of the BackRest executable. The version number is used to track features but does not affect what
|
||||
# repositories or manifests can be read - that's the job of the format number.
|
||||
#-----------------------------------------------------------------------------------------------------------------------------------
|
||||
push @EXPORT, qw(PROJECT_VERSION);
|
||||
|
||||
# Repository Format Number
|
||||
#
|
||||
# Defines format for info and manifest files as well as on-disk structure. If this number changes then the repository will be
|
||||
# Defines format for info and manifest files as well as on-disk structure. If this number changes then the repository will be
|
||||
# invalid unless migration functions are written.
|
||||
#-----------------------------------------------------------------------------------------------------------------------------------
|
||||
push @EXPORT, qw(REPOSITORY_FORMAT);
|
||||
|
@ -1,7 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE doc SYSTEM "manifest.dtd">
|
||||
<doc>
|
||||
<!-- System-wide variables -->
|
||||
<!-- System-wide variables -->
|
||||
<variable-list>
|
||||
<variable key="project">pgBackRest</variable>
|
||||
<variable key="project-tagline">Reliable PostgreSQL Backup & Restore</variable>
|
||||
@ -57,15 +57,15 @@
|
||||
$stryMonth[$month] . ' ' . $mday . ', ' . $year;
|
||||
</variable>
|
||||
|
||||
<!-- HTML variables -->
|
||||
<!-- HTML variables -->
|
||||
<variable key="html-footer" eval='y'>
|
||||
'Copyright &copy; 2015' . '-' . substr('{[release-date]}', length('{[release-date]}') - 4) .
|
||||
', The PostgreSQL Global Development Group, <a href="{[github-url-license]}">MIT License</a>. Updated ' .
|
||||
', The PostgreSQL Global Development Group, <a href="{[github-url-license]}">MIT License</a>. Updated ' .
|
||||
'{[release-date]}';
|
||||
</variable>
|
||||
<!-- <variable key="html-logo"><img src="{[project-logo]}"></variable> -->
|
||||
|
||||
<!-- PDF variables -->
|
||||
<!-- PDF variables -->
|
||||
<variable key="pdf-logo">{[doc-path]}/output/latex/logo</variable>
|
||||
|
||||
<variable key="pdf-title1">{[project]} User Guide</variable>
|
||||
@ -117,10 +117,9 @@
|
||||
|
||||
<!--
|
||||
Building the contributing document has some special requirements because it runs Docker in Docker so the repo path
|
||||
must align on the host and all Docker containers. Run `pgbackrest/doc/doc.pl` from within the home directory of
|
||||
the user that will do the doc build, e.g. `home/vagrant`. If the repo is not located directly in the home
|
||||
directory, e.g. `/home/vagrant/pgbackrest`, then a symlink may be used,
|
||||
e.g. `ln -s /path/to/repo /home/vagrant/pgbackrest`.
|
||||
must align on the host and all Docker containers. Run `pgbackrest/doc/doc.pl` from within the home directory of the
|
||||
user that will do the doc build, e.g. `home/vagrant`. If the repo is not located directly in the home directory,
|
||||
e.g. `/home/vagrant/pgbackrest`, then a symlink may be used, e.g. `ln -s /path/to/repo /home/vagrant/pgbackrest`.
|
||||
-->
|
||||
<render-source key="contributing" file="../../../CONTRIBUTING.md"/>
|
||||
<render-source key="documentation" file="../../README.md"/>
|
||||
|
@ -4,7 +4,7 @@ The certificates in this directory are used for documentation generation only an
|
||||
|
||||
## pgBackRest CA
|
||||
|
||||
Generate a CA that will be used to sign documentation certificates. It can be installed in the documentation containers to make certificates signed by it valid.
|
||||
Generate a CA that will be used to sign documentation certificates. It can be installed in the documentation containers to make certificates signed by it valid.
|
||||
|
||||
```
|
||||
cd [pgbackrest-root]/doc/resource/fake-cert
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -25,7 +25,7 @@
|
||||
<section id="indentation">
|
||||
<title>Indentation</title>
|
||||
|
||||
<p>Indentation is four spaces -- no tabs. Only file types that absolutely require tabs (e.g. `Makefile`) may use them.</p>
|
||||
<p>Indentation is four spaces -- no tabs. Only file types that absolutely require tabs (e.g. `Makefile`) may use them.</p>
|
||||
</section>
|
||||
|
||||
<!-- =================================================================================================================== -->
|
||||
@ -96,7 +96,7 @@ typedef struct InlineCommentExample
|
||||
<list-item><id>nameIdx</id> - loop variable for iterating through a list of names</list-item>
|
||||
</list>
|
||||
|
||||
<p>Variable names should be descriptive. Avoid <id>i</id>, <id>j</id>, etc.</p>
|
||||
<p>Variable names should be descriptive. Avoid <id>i</id>, <id>j</id>, etc.</p>
|
||||
</section>
|
||||
|
||||
<!-- =============================================================================================================== -->
|
||||
@ -124,7 +124,7 @@ typedef struct InlineCommentExample
|
||||
|
||||
<p>The value should be aligned at column 69 whenever possible.</p>
|
||||
|
||||
<p>This type of constant should mostly be used for strings. Use enums whenever possible for integer constants.</p>
|
||||
<p>This type of constant should mostly be used for strings. Use enums whenever possible for integer constants.</p>
|
||||
|
||||
<p><b>String Constants</b></p>
|
||||
|
||||
@ -143,7 +143,7 @@ typedef struct InlineCommentExample
|
||||
STRING_EXTERN(SAMPLE_VALUE_STR, SAMPLE_VALUE);
|
||||
</code-block>
|
||||
|
||||
<p>Static strings declared in the C file are not required to have a <code>#define</code> if the <code>#define</code> version is not used. Externed strings must always have the <code>#define</code> in the header file.</p>
|
||||
<p>Static strings declared in the C file are not required to have a <code>#define</code> if the <code>#define</code> version is not used. Externed strings must always have the <code>#define</code> in the header file.</p>
|
||||
|
||||
<p><b>Enum Constants</b></p>
|
||||
|
||||
@ -157,7 +157,7 @@ typedef enum
|
||||
} CipherMode;
|
||||
</code-block>
|
||||
|
||||
<p>Note the comma after the last element. This reduces diff churn when new elements are added.</p>
|
||||
<p>Note the comma after the last element. This reduces diff churn when new elements are added.</p>
|
||||
</section>
|
||||
|
||||
<!-- =============================================================================================================== -->
|
||||
@ -212,7 +212,7 @@ typedef enum
|
||||
<section id="braces">
|
||||
<title>Braces</title>
|
||||
|
||||
<p>C allows braces to be excluded for a single statement. However, braces should be used when the control statement (if, while, etc.) spans more than one line or the statement to be executed spans more than one line.</p>
|
||||
<p>C allows braces to be excluded for a single statement. However, braces should be used when the control statement (if, while, etc.) spans more than one line or the statement to be executed spans more than one line.</p>
|
||||
|
||||
<p>No braces needed:</p>
|
||||
|
||||
@ -291,14 +291,14 @@ switch (int)
|
||||
<section id="macros">
|
||||
<title>Macros</title>
|
||||
|
||||
<p>Don't use a macro when a function could be used instead. Macros make it hard to measure code coverage.</p>
|
||||
<p>Don't use a macro when a function could be used instead. Macros make it hard to measure code coverage.</p>
|
||||
</section>
|
||||
|
||||
<!-- =================================================================================================================== -->
|
||||
<section id="objects">
|
||||
<title>Objects</title>
|
||||
|
||||
<p>Object-oriented programming is used extensively. The object pointer is always referred to as <id>this</id>.</p>
|
||||
<p>Object-oriented programming is used extensively. The object pointer is always referred to as <id>this</id>.</p>
|
||||
|
||||
<p>An object can expose internal struct members by defining a public struct that contains the members to be exposed and using inline functions to get/set the members.</p>
|
||||
|
||||
@ -340,9 +340,9 @@ struct List
|
||||
<section id="variadic-functions">
|
||||
<title>Variadic Functions</title>
|
||||
|
||||
<p>Variadic functions can take a variable number of parameters. While the <code>printf()</code> pattern is variadic, it is not very flexible in terms of optional parameters given in any order.</p>
|
||||
<p>Variadic functions can take a variable number of parameters. While the <code>printf()</code> pattern is variadic, it is not very flexible in terms of optional parameters given in any order.</p>
|
||||
|
||||
<p>This project implements variadic functions using macros (which are exempt from the normal macro rule of being all caps). A typical variadic function definition:</p>
|
||||
<p>This project implements variadic functions using macros (which are exempt from the normal macro rule of being all caps). A typical variadic function definition:</p>
|
||||
|
||||
<code-block type="c">
|
||||
typedef struct StoragePathCreateParam
|
||||
@ -374,7 +374,7 @@ storagePathCreateP(storageLocal(), "/tmp/pgbackrest");
|
||||
storagePathCreateP(storageLocal(), "/tmp/pgbackrest", .errorOnExists = true, .mode = 0777);
|
||||
</code-block>
|
||||
|
||||
<p>If the majority of functions in a module or object are variadic it is best to provide macros for all functions even if they do not have variable parameters. Do not use the base function when variadic macros exist.</p>
|
||||
<p>If the majority of functions in a module or object are variadic it is best to provide macros for all functions even if they do not have variable parameters. Do not use the base function when variadic macros exist.</p>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
@ -390,7 +390,7 @@ storagePathCreateP(storageLocal(), "/tmp/pgbackrest", .errorOnExists = true, .mo
|
||||
<section id="uncoverable">
|
||||
<title>Uncoverable Code</title>
|
||||
|
||||
<p>The <id>uncoverable</id> keyword marks code that can never be covered. For instance, a function that never returns because it always throws an error. Uncoverable code should be rare to non-existent outside the common libraries and test code.</p>
|
||||
<p>The <id>uncoverable</id> keyword marks code that can never be covered. For instance, a function that never returns because it always throws an error. Uncoverable code should be rare to non-existent outside the common libraries and test code.</p>
|
||||
|
||||
<code-block type="c">
|
||||
} // {uncoverable - function throws error so never returns}
|
||||
@ -403,7 +403,7 @@ storagePathCreateP(storageLocal(), "/tmp/pgbackrest", .errorOnExists = true, .mo
|
||||
<section id="uncovered">
|
||||
<title>Uncovered Code</title>
|
||||
|
||||
<p>Marks code that is not tested for one reason or another. This should be kept to a minimum and an excuse given for each instance.</p>
|
||||
<p>Marks code that is not tested for one reason or another. This should be kept to a minimum and an excuse given for each instance.</p>
|
||||
|
||||
<code-block type="c">
|
||||
exit(EXIT_FAILURE); // {uncovered - test harness does not support non-zero exit}
|
||||
|
@ -107,7 +107,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>Some unit tests and all the integration tests require <proper>Docker</proper>. Running in containers allows us to simulate multiple hosts, test on different distributions and versions of <postgres/>, and use sudo without affecting the host system.</p>
|
||||
<p>Some unit tests and all the integration tests require <proper>Docker</proper>. Running in containers allows us to simulate multiple hosts, test on different distributions and versions of <postgres/>, and use sudo without affecting the host system.</p>
|
||||
|
||||
<execute-list host="{[host-contrib]}">
|
||||
<title>Install Docker</title>
|
||||
@ -131,7 +131,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>This clone of the <backrest/> repository is sufficient for experimentation. For development, create a fork and clone that instead.</p>
|
||||
<p>This clone of the <backrest/> repository is sufficient for experimentation. For development, create a fork and clone that instead.</p>
|
||||
|
||||
<execute-list host="{[host-contrib]}">
|
||||
<title>Clone <backrest/> repository</title>
|
||||
@ -180,7 +180,7 @@
|
||||
|
||||
<code-block type="c">
|
||||
/*
|
||||
* HEADER FILE - see db.h for a complete implementation example
|
||||
* HEADER FILE - see db.h for a complete implementation example
|
||||
*/
|
||||
|
||||
// Typedef the object declared in the C file
|
||||
@ -738,7 +738,7 @@ run 8/1 ------------- L2285 no current backups
|
||||
<option id="force" name="Force">
|
||||
<summary>Force a restore.</summary>
|
||||
|
||||
<text>By itself this option forces the <postgres/> data and tablespace paths to be completely overwritten. In combination with <br-option>--delta</br-option> a timestamp/size delta will be performed instead of using checksums.</text>
|
||||
<text>By itself this option forces the <postgres/> data and tablespace paths to be completely overwritten. In combination with <br-option>--delta</br-option> a timestamp/size delta will be performed instead of using checksums.</text>
|
||||
|
||||
<example>y</example>
|
||||
</option>
|
||||
@ -789,7 +789,7 @@ pgbackrest/doc/doc.pl --out=html --no-exe
|
||||
pgbackrest/doc/doc.pl --out=html --include=user-guide --require=/quickstart --var=encrypt=n --no-cache --pre
|
||||
</code-block>
|
||||
|
||||
<p>The resulting Docker containers can be listed with <code>docker ps</code> and the container can be entered with <code>docker exec doc-pg-primary bash</code>. Additionally, the <code>-u</code> option can be added for entering the container as a specific user (e.g. <code>postgres</code>).</p>
|
||||
<p>The resulting Docker containers can be listed with <code>docker ps</code> and the container can be entered with <code>docker exec doc-pg-primary bash</code>. Additionally, the <code>-u</code> option can be added for entering the container as a specific user (e.g. <code>postgres</code>).</p>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
./doc.pl --out=html --include=user-guide --var=os-type=rhel
|
||||
</code-block>
|
||||
|
||||
<p>Documentation generation will build a cache of all executed statements and use the cache to build the documentation quickly if no executed statements have changed. This makes proofing text-only edits very fast, but sometimes it is useful to do a full build without using the cache:</p>
|
||||
<p>Documentation generation will build a cache of all executed statements and use the cache to build the documentation quickly if no executed statements have changed. This makes proofing text-only edits very fast, but sometimes it is useful to do a full build without using the cache:</p>
|
||||
|
||||
<code-block type="bash">
|
||||
./doc.pl --out=html --include=user-guide --var=os-type=rhel --no-cache
|
||||
@ -65,9 +65,9 @@ sudo usermod -aG docker testdoc
|
||||
<section id="package">
|
||||
<title>Building with Packages</title>
|
||||
|
||||
<p>A user-specified package can be used when building the documentation. Since the documentation exercises most <backrest/> functionality this is a great way to smoke-test packages.</p>
|
||||
<p>A user-specified package can be used when building the documentation. Since the documentation exercises most <backrest/> functionality this is a great way to smoke-test packages.</p>
|
||||
|
||||
<p>The package must be located within the <backrest/> repo and the specified path should be relative to the repository base. <path>test/package</path> is a good default path to use.</p>
|
||||
<p>The package must be located within the <backrest/> repo and the specified path should be relative to the repository base. <path>test/package</path> is a good default path to use.</p>
|
||||
|
||||
<p>Ubuntu 16.04:</p>
|
||||
|
||||
|
@ -136,7 +136,7 @@ process-max=1
|
||||
|
||||
<p>It is often desirable to restore the latest backup from a production server to a development server. In principal, the instructions are the same as in <link url="https://pgbackrest.org/user-guide.html#replication/hot-standby">setting up a hot standby</link> with a few exceptions.</p>
|
||||
|
||||
<p>NEED TO ELABORATE HERE: Need an example of the restore command - what settings are different? Would they be {[dash]}-target, {[dash]}-target-action=promote, {[dash]}-type=immediate on the command-line? What about in the POSTGRES (e.g. hot_standby = on / wal_level = hot_standby - these would be different, no?) and PGBACKREST (e.g. would recovery-option=standby_mode=on still be set?) config files</p>
|
||||
<p>NEED TO ELABORATE HERE: Need an example of the restore command - what settings are different? Would they be {[dash]}-target, {[dash]}-target-action=promote, {[dash]}-type=immediate on the command-line? What about in the POSTGRES (e.g. hot_standby = on / wal_level = hot_standby - these would be different, no?) and PGBACKREST (e.g. would recovery-option=standby_mode=on still be set?) config files</p>
|
||||
</section> -->
|
||||
|
||||
<!-- ======================================================================================================================= -->
|
||||
|
@ -33,7 +33,7 @@
|
||||
|
||||
<p><backrest/> aims to be a reliable, easy-to-use backup and restore solution that can seamlessly scale up to the largest databases and workloads by utilizing algorithms that are optimized for database-specific requirements.</p>
|
||||
|
||||
<p><backrest/> <link url="{[github-url-base]}/releases/tag/release/{[version-stable]}">v{[version-stable]}</link> is the current stable release. Release notes are on the <link page="{[backrest-page-release]}">Releases</link> page.</p>
|
||||
<p><backrest/> <link url="{[github-url-base]}/releases/tag/release/{[version-stable]}">v{[version-stable]}</link> is the current stable release. Release notes are on the <link page="{[backrest-page-release]}">Releases</link> page.</p>
|
||||
|
||||
<p>Please find us on <link url="{[github-url-base]}">GitHub</link> and give us a star if you like <backrest/>!</p>
|
||||
</section>
|
||||
@ -92,7 +92,7 @@
|
||||
<section id="page-checksum">
|
||||
<title>Page Checksums</title>
|
||||
|
||||
<p><postgres/> has supported page-level checksums since 9.3. If page checksums are enabled <backrest/> will validate the checksums for every file that is copied during a backup. All page checksums are validated during a full backup and checksums in files that have changed are validated during differential and incremental backups.</p>
|
||||
<p><postgres/> has supported page-level checksums since 9.3. If page checksums are enabled <backrest/> will validate the checksums for every file that is copied during a backup. All page checksums are validated during a full backup and checksums in files that have changed are validated during differential and incremental backups.</p>
|
||||
|
||||
<p>Validation failures do not stop the backup process, but warnings with details of exactly which pages have failed validation are output to the console and file log.</p>
|
||||
|
||||
@ -126,22 +126,22 @@
|
||||
<section id="parallel-archiving">
|
||||
<title>Parallel, Asynchronous WAL Push & Get</title>
|
||||
|
||||
<p>Dedicated commands are included for pushing WAL to the archive and getting WAL from the archive. Both commands support parallelism to accelerate processing and run asynchronously to provide the fastest possible response time to <postgres/>.</p>
|
||||
<p>Dedicated commands are included for pushing WAL to the archive and getting WAL from the archive. Both commands support parallelism to accelerate processing and run asynchronously to provide the fastest possible response time to <postgres/>.</p>
|
||||
|
||||
<p>WAL push automatically detects WAL segments that are pushed multiple times and de-duplicates when the segment is identical, otherwise an error is raised. Asynchronous WAL push allows transfer to be offloaded to another process which compresses WAL segments in parallel for maximum throughput. This can be a critical feature for databases with extremely high write volume.</p>
|
||||
<p>WAL push automatically detects WAL segments that are pushed multiple times and de-duplicates when the segment is identical, otherwise an error is raised. Asynchronous WAL push allows transfer to be offloaded to another process which compresses WAL segments in parallel for maximum throughput. This can be a critical feature for databases with extremely high write volume.</p>
|
||||
|
||||
<p>Asynchronous WAL get maintains a local queue of WAL segments that are decompressed and ready for replay. This reduces the time needed to provide WAL to <postgres/> which maximizes replay speed. Higher-latency connections and storage (such as <proper>S3</proper>) benefit the most.</p>
|
||||
<p>Asynchronous WAL get maintains a local queue of WAL segments that are decompressed and ready for replay. This reduces the time needed to provide WAL to <postgres/> which maximizes replay speed. Higher-latency connections and storage (such as <proper>S3</proper>) benefit the most.</p>
|
||||
|
||||
<p>The push and get commands both ensure that the database and repository match by comparing <postgres/> versions and system identifiers. This virtually eliminates the possibility of misconfiguring the WAL archive location.</p>
|
||||
<p>The push and get commands both ensure that the database and repository match by comparing <postgres/> versions and system identifiers. This virtually eliminates the possibility of misconfiguring the WAL archive location.</p>
|
||||
</section>
|
||||
|
||||
<!-- =================================================================================================================== -->
|
||||
<section id="tablespace-link-support">
|
||||
<title>Tablespace & Link Support</title>
|
||||
|
||||
<p>Tablespaces are fully supported and on restore tablespaces can be remapped to any location. It is also possible to remap all tablespaces to one location with a single command which is useful for development restores.</p>
|
||||
<p>Tablespaces are fully supported and on restore tablespaces can be remapped to any location. It is also possible to remap all tablespaces to one location with a single command which is useful for development restores.</p>
|
||||
|
||||
<p>File and directory links are supported for any file or directory in the <postgres/> cluster. When restoring it is possible to restore all links to their original locations, remap some or all links, or restore some or all links as normal files or directories within the cluster directory.</p>
|
||||
<p>File and directory links are supported for any file or directory in the <postgres/> cluster. When restoring it is possible to restore all links to their original locations, remap some or all links, or restore some or all links as normal files or directories within the cluster directory.</p>
|
||||
</section>
|
||||
|
||||
<!-- =================================================================================================================== -->
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
<p>Function/line coverage is complete with no exceptions.</p>
|
||||
|
||||
<p>Branch coverage excludes branches inside macros and <code>assert()</code> calls. Macros have their own unit tests so they do not need to be tested everywhere they appear. Asserts are not expected to have complete branch coverage since they test cases that should always be true.</p>
|
||||
<p>Branch coverage excludes branches inside macros and <code>assert()</code> calls. Macros have their own unit tests so they do not need to be tested everywhere they appear. Asserts are not expected to have complete branch coverage since they test cases that should always be true.</p>
|
||||
|
||||
<table>
|
||||
<table-header>
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -685,7 +685,7 @@
|
||||
|
||||
<!-- ======================================================================================================================= -->
|
||||
<block-define id="azure-setup">
|
||||
<p><backrest/> supports locating repositories in <proper>Azure-compatible</proper> object stores. The container used to store the repository must be created in advance &mdash; <backrest/> will not do it automatically. The repository can be located in the container root (<path>/</path>) but it's usually best to place it in a subpath so object store logs or other data can also be stored in the container without conflicts.</p>
|
||||
<p><backrest/> supports locating repositories in <proper>Azure-compatible</proper> object stores. The container used to store the repository must be created in advance &mdash; <backrest/> will not do it automatically. The repository can be located in the container root (<path>/</path>) but it's usually best to place it in a subpath so object store logs or other data can also be stored in the container without conflicts.</p>
|
||||
|
||||
<admonition type="warning">Do not enable <quote>hierarchical namespace</quote> as this will cause errors during expire.</admonition>
|
||||
|
||||
@ -740,7 +740,7 @@
|
||||
|
||||
<!-- ======================================================================================================================= -->
|
||||
<block-define id="s3-setup">
|
||||
<p><backrest/> supports locating repositories in <proper>S3-compatible</proper> object stores. The bucket used to store the repository must be created in advance &mdash; <backrest/> will not do it automatically. The repository can be located in the bucket root (<path>/</path>) but it's usually best to place it in a subpath so object store logs or other data can also be stored in the bucket without conflicts.</p>
|
||||
<p><backrest/> supports locating repositories in <proper>S3-compatible</proper> object stores. The bucket used to store the repository must be created in advance &mdash; <backrest/> will not do it automatically. The repository can be located in the bucket root (<path>/</path>) but it's usually best to place it in a subpath so object store logs or other data can also be stored in the bucket without conflicts.</p>
|
||||
|
||||
<backrest-config host="{[s3-setup-host]}" file="{[backrest-config-demo]}" owner="{[s3-setup-config-owner]}">
|
||||
<title>Configure <proper>S3</proper></title>
|
||||
@ -770,7 +770,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<admonition type="note">The region and endpoint will need to be configured to where the bucket is located. The values given here are for the <id>{[s3-region]}</id> region.</admonition>
|
||||
<admonition type="note">The region and endpoint will need to be configured to where the bucket is located. The values given here are for the <id>{[s3-region]}</id> region.</admonition>
|
||||
</block-define>
|
||||
|
||||
<!-- ======================================================================================================================= -->
|
||||
@ -789,9 +789,9 @@
|
||||
|
||||
<p>Configuration information and documentation for PostgreSQL can be found in the <postgres/> <link url='http://www.postgresql.org/docs/{[pg-version]}/static/index.html'>Manual</link>.</p>
|
||||
|
||||
<p>A somewhat novel approach is taken to documentation in this user guide. Each command is run on a virtual machine when the documentation is built from the XML source. This means you can have a high confidence that the commands work correctly in the order presented. Output is captured and displayed below the command when appropriate. If the output is not included it is because it was deemed not relevant or was considered a distraction from the narrative.</p>
|
||||
<p>A somewhat novel approach is taken to documentation in this user guide. Each command is run on a virtual machine when the documentation is built from the XML source. This means you can have a high confidence that the commands work correctly in the order presented. Output is captured and displayed below the command when appropriate. If the output is not included it is because it was deemed not relevant or was considered a distraction from the narrative.</p>
|
||||
|
||||
<p>All commands are intended to be run as an unprivileged user that has sudo privileges for both the <user>root</user> and <user>postgres</user> users. It's also possible to run the commands directly as their respective users without modification and in that case the <cmd>sudo</cmd> commands can be stripped off.</p>
|
||||
<p>All commands are intended to be run as an unprivileged user that has sudo privileges for both the <user>root</user> and <user>postgres</user> users. It's also possible to run the commands directly as their respective users without modification and in that case the <cmd>sudo</cmd> commands can be stripped off.</p>
|
||||
</section>
|
||||
|
||||
<!-- ======================================================================================================================= -->
|
||||
@ -817,7 +817,7 @@
|
||||
<section id="restore">
|
||||
<title>Restore</title>
|
||||
|
||||
<p>A restore is the act of copying a backup to a system where it will be started as a live database cluster. A restore requires the backup files and one or more WAL segments in order to work correctly.</p>
|
||||
<p>A restore is the act of copying a backup to a system where it will be started as a live database cluster. A restore requires the backup files and one or more WAL segments in order to work correctly.</p>
|
||||
</section>
|
||||
|
||||
<!-- =================================================================================================================== -->
|
||||
@ -826,7 +826,7 @@
|
||||
|
||||
<p>WAL is the mechanism that <postgres/> uses to ensure that no committed changes are lost. Transactions are written sequentially to the WAL and a transaction is considered to be committed when those writes are flushed to disk. Afterwards, a background process writes the changes into the main database cluster files (also known as the heap). In the event of a crash, the WAL is replayed to make the database consistent.</p>
|
||||
|
||||
<p>WAL is conceptually infinite but in practice is broken up into individual 16MB files called segments. WAL segments follow the naming convention <id>0000000100000A1E000000FE</id> where the first 8 hexadecimal digits represent the timeline and the next 16 digits are the logical sequence number (LSN).</p>
|
||||
<p>WAL is conceptually infinite but in practice is broken up into individual 16MB files called segments. WAL segments follow the naming convention <id>0000000100000A1E000000FE</id> where the first 8 hexadecimal digits represent the timeline and the next 16 digits are the logical sequence number (LSN).</p>
|
||||
|
||||
</section>
|
||||
|
||||
@ -848,21 +848,21 @@
|
||||
<section id="v1-v2">
|
||||
<title>Upgrading {[project]} from v1 to v2</title>
|
||||
|
||||
<p>Upgrading from <proper>v1</proper> to <proper>v2</proper> is fairly straight-forward. The repository format has not changed and all non-deprecated options from <proper>v1</proper> are accepted, so for most installations it is simply a matter of installing the new version.</p>
|
||||
<p>Upgrading from <proper>v1</proper> to <proper>v2</proper> is fairly straight-forward. The repository format has not changed and all non-deprecated options from <proper>v1</proper> are accepted, so for most installations it is simply a matter of installing the new version.</p>
|
||||
|
||||
<p>However, there are a few caveats:</p>
|
||||
|
||||
<list>
|
||||
<list-item>The deprecated <br-option>thread-max</br-option> option is no longer valid. Use <br-option>process-max</br-option> instead.</list-item>
|
||||
<list-item>The deprecated <br-option>thread-max</br-option> option is no longer valid. Use <br-option>process-max</br-option> instead.</list-item>
|
||||
|
||||
<list-item>The deprecated <br-option>archive-max-mb</br-option> option is no longer valid. This has been replaced with the <br-option>archive-push-queue-max</br-option> option which has different semantics.</list-item>
|
||||
<list-item>The deprecated <br-option>archive-max-mb</br-option> option is no longer valid. This has been replaced with the <br-option>archive-push-queue-max</br-option> option which has different semantics.</list-item>
|
||||
|
||||
<list-item>The default for the <br-option>backup-user</br-option> option has changed from <id>backrest</id> to <id>pgbackrest</id>.</list-item>
|
||||
|
||||
<list-item>In <proper>v2.02</proper> the default location of the <backrest/> configuration file has changed from <file>/etc/pgbackrest.conf</file> to <file>/etc/pgbackrest/pgbackrest.conf</file>. If <file>/etc/pgbackrest/pgbackrest.conf</file> does not exist, the <file>/etc/pgbackrest.conf</file> file will be loaded instead, if it exists.</list-item>
|
||||
</list>
|
||||
|
||||
<p>Many option names have changed to improve consistency although the old names from <proper>v1</proper> are still accepted. In general, <id>db-*</id> options have been renamed to <id>pg-*</id> and <id>backup-*</id>/<id>retention-*</id> options have been renamed to <id>repo-*</id> when appropriate.</p>
|
||||
<p>Many option names have changed to improve consistency although the old names from <proper>v1</proper> are still accepted. In general, <id>db-*</id> options have been renamed to <id>pg-*</id> and <id>backup-*</id>/<id>retention-*</id> options have been renamed to <id>repo-*</id> when appropriate.</p>
|
||||
|
||||
<p><postgres/> and repository options must be indexed when using the new names introduced in <proper>v2</proper>, e.g. <br-option>pg1-host</br-option>, <br-option>pg1-path</br-option>, <br-option>repo1-path</br-option>, <br-option>repo1-type</br-option>, etc.</p>
|
||||
</section>
|
||||
@ -879,13 +879,13 @@
|
||||
<section if="'{[package]}' eq 'none'" id="build">
|
||||
<title>Build</title>
|
||||
|
||||
<p if="{[os-type-is-debian]}">{[user-guide-os]} packages for <backrest/> are available at <link url="https://www.postgresql.org/download/linux/ubuntu/">apt.postgresql.org</link>. If they are not provided for your distribution/version it is easy to download the source and install manually.</p>
|
||||
<p if="{[os-type-is-debian]}">{[user-guide-os]} packages for <backrest/> are available at <link url="https://www.postgresql.org/download/linux/ubuntu/">apt.postgresql.org</link>. If they are not provided for your distribution/version it is easy to download the source and install manually.</p>
|
||||
|
||||
<p if="{[os-type-is-rhel]}">{[user-guide-os]} packages for <backrest/> are available from <link url="{[crunchy-url-base]}">Crunchy Data</link> or <link url="http://yum.postgresql.org">yum.postgresql.org</link>, but it is also easy to download the source and install manually.</p>
|
||||
|
||||
<host-add id="{[host-build-id]}" name="{[host-build]}" user="{[host-build-user]}" image="{[host-build-image]}" os="{[os-type]}" mount="{[host-build-mount]}" option="{[host-option]}"/>
|
||||
|
||||
<p>When building from source it is best to use a build host rather than building on production. Many of the tools required for the build should generally not be installed in production. <backrest/> consists of a single executable so it is easy to copy to a new host once it is built.</p>
|
||||
<p>When building from source it is best to use a build host rather than building on production. Many of the tools required for the build should generally not be installed in production. <backrest/> consists of a single executable so it is easy to copy to a new host once it is built.</p>
|
||||
|
||||
<execute-list host="{[host-build]}">
|
||||
<title>Download version <id>{[version]}</id> of <backrest/> to <path>{[build-path]}</path> path</title>
|
||||
@ -974,7 +974,7 @@
|
||||
<block-variable-replace key="br-install-group">postgres</block-variable-replace>
|
||||
</block>
|
||||
|
||||
<p><backrest/> should now be properly installed but it is best to check. If any dependencies were missed then you will get an error when running <backrest/> from the command line.</p>
|
||||
<p><backrest/> should now be properly installed but it is best to check. If any dependencies were missed then you will get an error when running <backrest/> from the command line.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Make sure the installation worked</title>
|
||||
@ -1033,7 +1033,7 @@
|
||||
|
||||
<p>The name 'demo' describes the purpose of this cluster accurately so that will also make a good stanza name.</p>
|
||||
|
||||
<p><backrest/> needs to know where the base data directory for the <postgres/> cluster is located. The path can be requested from <postgres/> directly but in a recovery scenario the <postgres/> process will not be available. During backups the value supplied to <backrest/> will be compared against the path that <postgres/> is running on and they must be equal or the backup will return an error. Make sure that <br-option>pg-path</br-option> is exactly equal to <pg-option>data_directory</pg-option> in <file>postgresql.conf</file>.</p>
|
||||
<p><backrest/> needs to know where the base data directory for the <postgres/> cluster is located. The path can be requested from <postgres/> directly but in a recovery scenario the <postgres/> process will not be available. During backups the value supplied to <backrest/> will be compared against the path that <postgres/> is running on and they must be equal or the backup will return an error. Make sure that <br-option>pg-path</br-option> is exactly equal to <pg-option>data_directory</pg-option> in <file>postgresql.conf</file>.</p>
|
||||
|
||||
<p>By default {[user-guide-os]} stores clusters in <path>{[pg-path-default]}</path> so it is easy to determine the correct path for the data directory.</p>
|
||||
|
||||
@ -1048,7 +1048,7 @@
|
||||
<backrest-config-option section="global" key="log-timestamp">n</backrest-config-option>
|
||||
</backrest-config>
|
||||
|
||||
<p><backrest/> configuration files follow the Windows INI convention. Sections are denoted by text in brackets and key/value pairs are contained in each section. Lines beginning with <id>#</id> are ignored and can be used as comments.</p>
|
||||
<p><backrest/> configuration files follow the Windows INI convention. Sections are denoted by text in brackets and key/value pairs are contained in each section. Lines beginning with <id>#</id> are ignored and can be used as comments.</p>
|
||||
|
||||
<p>There are multiple ways the <backrest/> configuration files can be loaded:</p>
|
||||
<list>
|
||||
@ -1090,7 +1090,7 @@
|
||||
|
||||
<option-description key="repo-path"/>
|
||||
|
||||
<p>For this demonstration the repository will be stored on the same host as the <postgres/> server. This is the simplest configuration and is useful in cases where traditional backup software is employed to backup the database host.</p>
|
||||
<p>For this demonstration the repository will be stored on the same host as the <postgres/> server. This is the simplest configuration and is useful in cases where traditional backup software is employed to backup the database host.</p>
|
||||
|
||||
<block id="br-install-repo">
|
||||
<block-variable-replace key="br-install-host">{[host-pg1]}</block-variable-replace>
|
||||
@ -1151,7 +1151,7 @@
|
||||
<section id="configure-archiving">
|
||||
<title>Configure Archiving</title>
|
||||
|
||||
<p>Backing up a running <postgres/> cluster requires WAL archiving to be enabled. Note that <i>at least</i> one WAL segment will be created during the backup process even if no explicit writes are made to the cluster.</p>
|
||||
<p>Backing up a running <postgres/> cluster requires WAL archiving to be enabled. Note that <i>at least</i> one WAL segment will be created during the backup process even if no explicit writes are made to the cluster.</p>
|
||||
|
||||
<postgres-config host="{[host-pg1]}" file="{[postgres-config-demo]}">
|
||||
<title>Configure archive settings</title>
|
||||
@ -1196,7 +1196,7 @@
|
||||
|
||||
<p>When archiving a WAL segment is expected to take more than 60 seconds (the default) to reach the <backrest/> repository, then the <backrest/> <br-option>archive-timeout</br-option> option should be increased. Note that this option is not the same as the <postgres/> <pg-option>archive_timeout</pg-option> option which is used to force a WAL segment switch; useful for databases where there are long periods of inactivity. For more information on the <postgres/> <pg-option>archive_timeout</pg-option> option, see <postgres/> <link url="https://www.postgresql.org/docs/current/static/runtime-config-wal.html">Write Ahead Log</link>.</p>
|
||||
|
||||
<p>The <cmd>archive-push</cmd> command can be configured with its own options. For example, a lower compression level may be set to speed archiving without affecting the compression used for backups.</p>
|
||||
<p>The <cmd>archive-push</cmd> command can be configured with its own options. For example, a lower compression level may be set to speed archiving without affecting the compression used for backups.</p>
|
||||
|
||||
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
||||
<title>Config <cmd>archive-push</cmd> to use a lower compression level</title>
|
||||
@ -1226,9 +1226,9 @@
|
||||
<section if="'{[encrypt]}' eq 'y'" id="configure-encryption">
|
||||
<title>Configure Repository Encryption</title>
|
||||
|
||||
<p>The repository will be configured with a cipher type and key to demonstrate encryption. Encryption is always performed client-side even if the repository type (e.g. <proper>S3</proper> or other object store) supports encryption.</p>
|
||||
<p>The repository will be configured with a cipher type and key to demonstrate encryption. Encryption is always performed client-side even if the repository type (e.g. <proper>S3</proper> or other object store) supports encryption.</p>
|
||||
|
||||
<p>It is important to use a long, random passphrase for the cipher key. A good way to generate one is to run: <code>openssl rand -base64 48</code>.</p>
|
||||
<p>It is important to use a long, random passphrase for the cipher key. A good way to generate one is to run: <code>openssl rand -base64 48</code>.</p>
|
||||
|
||||
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
||||
<title>Configure <backrest/> repository encryption</title>
|
||||
@ -1309,7 +1309,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>By default <backrest/> will attempt to perform an incremental backup. However, an incremental backup must be based on a full backup and since no full backup existed <backrest/> ran a full backup instead.</p>
|
||||
<p>By default <backrest/> will attempt to perform an incremental backup. However, an incremental backup must be based on a full backup and since no full backup existed <backrest/> ran a full backup instead.</p>
|
||||
|
||||
<p>The <br-option>type</br-option> option can be used to specify a full or differential backup.</p>
|
||||
|
||||
@ -1323,7 +1323,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>This time there was no warning because a full backup already existed. While incremental backups can be based on a full <i>or</i> differential backup, differential backups must be based on a full backup. A full backup can be performed by running the <cmd>backup</cmd> command with <br-setting>{[dash]}-type=full</br-setting>.</p>
|
||||
<p>This time there was no warning because a full backup already existed. While incremental backups can be based on a full <i>or</i> differential backup, differential backups must be based on a full backup. A full backup can be performed by running the <cmd>backup</cmd> command with <br-setting>{[dash]}-type=full</br-setting>.</p>
|
||||
|
||||
<p>During an online backup <backrest/> waits for WAL segments that are required for backup consistency to be archived. This wait time is governed by the <backrest/> <br-option>archive-timeout</br-option> option which defaults to 60 seconds. If archiving an individual segment is known to take longer then this option should be increased.</p>
|
||||
</section>
|
||||
@ -1367,7 +1367,7 @@
|
||||
<section id="perform-restore" depend="perform-backup">
|
||||
<title>Restore a Backup</title>
|
||||
|
||||
<p>Backups can protect you from a number of disaster scenarios, the most common of which are hardware failure and data corruption. The easiest way to simulate data corruption is to remove an important <postgres/> cluster file.</p>
|
||||
<p>Backups can protect you from a number of disaster scenarios, the most common of which are hardware failure and data corruption. The easiest way to simulate data corruption is to remove an important <postgres/> cluster file.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Stop the {[postgres-cluster-demo]} cluster and delete the <file>pg_control</file> file</title>
|
||||
@ -1437,7 +1437,7 @@
|
||||
<section id="monitor" depend="/quickstart/perform-backup">
|
||||
<title>Monitoring</title>
|
||||
|
||||
<p>Monitoring is an important part of any production system. There are many tools available and <backrest/> can be monitored on any of them with a little work.</p>
|
||||
<p>Monitoring is an important part of any production system. There are many tools available and <backrest/> can be monitored on any of them with a little work.</p>
|
||||
|
||||
<p><backrest/> can output information about the repository in JSON format which includes a list of all backups for each stanza and WAL archive info.</p>
|
||||
|
||||
@ -1445,7 +1445,7 @@
|
||||
<section id="postgresql">
|
||||
<title>In <postgres/></title>
|
||||
|
||||
<p>The <postgres/> <id>COPY</id> command allows <backrest/> info to be loaded into a table. The following example wraps that logic in a function that can be used to perform real-time queries.</p>
|
||||
<p>The <postgres/> <id>COPY</id> command allows <backrest/> info to be loaded into a table. The following example wraps that logic in a function that can be used to perform real-time queries.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Load <backrest/> info function for <postgres/></title>
|
||||
@ -1529,7 +1529,7 @@
|
||||
</execute-list>
|
||||
|
||||
<admonition type="note">This syntax requires <proper>jq v1.5</proper>.</admonition>
|
||||
<admonition type="note"><proper>jq</proper> may round large numbers such as system identifiers. Test your queries carefully.</admonition>
|
||||
<admonition type="note"><proper>jq</proper> may round large numbers such as system identifiers. Test your queries carefully.</admonition>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
@ -1643,7 +1643,7 @@
|
||||
<section id="retention" depend="quickstart/perform-backup">
|
||||
<title>Retention</title>
|
||||
|
||||
<p>Generally it is best to retain as many backups as possible to provide a greater window for <link section="/pitr">Point-in-Time Recovery</link>, but practical concerns such as disk space must also be considered. Retention options remove older backups once they are no longer needed.</p>
|
||||
<p>Generally it is best to retain as many backups as possible to provide a greater window for <link section="/pitr">Point-in-Time Recovery</link>, but practical concerns such as disk space must also be considered. Retention options remove older backups once they are no longer needed.</p>
|
||||
|
||||
<cmd-description key="expire"/>
|
||||
|
||||
@ -1651,7 +1651,7 @@
|
||||
<section id="full">
|
||||
<title>Full Backup Retention</title>
|
||||
|
||||
<p>The <br-option>repo1-retention-full-type</br-option> determines how the option <br-option>repo1-retention-full</br-option> is interpreted; either as the count of full backups to be retained or how many days to retain full backups. New backups must be completed before expiration will occur &mdash; that means if <br-setting>repo1-retention-full-type=count</br-setting> and <br-setting>repo1-retention-full=2</br-setting> then there will be three full backups stored before the oldest one is expired, or if <br-setting>repo1-retention-full-type=time</br-setting> and <br-setting>repo1-retention-full=20</br-setting> then there must be one full backup that is at least 20 days old before expiration can occur.</p>
|
||||
<p>The <br-option>repo1-retention-full-type</br-option> determines how the option <br-option>repo1-retention-full</br-option> is interpreted; either as the count of full backups to be retained or how many days to retain full backups. New backups must be completed before expiration will occur &mdash; that means if <br-setting>repo1-retention-full-type=count</br-setting> and <br-setting>repo1-retention-full=2</br-setting> then there will be three full backups stored before the oldest one is expired, or if <br-setting>repo1-retention-full-type=time</br-setting> and <br-setting>repo1-retention-full=20</br-setting> then there must be one full backup that is at least 20 days old before expiration can occur.</p>
|
||||
|
||||
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
||||
<title>Configure <br-option>repo1-retention-full</br-option></title>
|
||||
@ -1675,7 +1675,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>Archive <i>is</i> expired because WAL segments were generated before the oldest backup. These are not useful for recovery &mdash; only WAL segments generated after a backup can be used to recover that backup.</p>
|
||||
<p>Archive <i>is</i> expired because WAL segments were generated before the oldest backup. These are not useful for recovery &mdash; only WAL segments generated after a backup can be used to recover that backup.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Perform a full backup</title>
|
||||
@ -1694,7 +1694,7 @@
|
||||
<section id="diff">
|
||||
<title>Differential Backup Retention</title>
|
||||
|
||||
<p>Set <br-option>repo1-retention-diff</br-option> to the number of differential backups required. Differentials only rely on the prior full backup so it is possible to create a <quote>rolling</quote> set of differentials for the last day or more. This allows quick restores to recent points-in-time but reduces overall space consumption.</p>
|
||||
<p>Set <br-option>repo1-retention-diff</br-option> to the number of differential backups required. Differentials only rely on the prior full backup so it is possible to create a <quote>rolling</quote> set of differentials for the last day or more. This allows quick restores to recent points-in-time but reduces overall space consumption.</p>
|
||||
|
||||
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
||||
<title>Configure <br-option>repo1-retention-diff</br-option></title>
|
||||
@ -1702,7 +1702,7 @@
|
||||
<backrest-config-option section="global" key="repo1-retention-diff">1</backrest-config-option>
|
||||
</backrest-config>
|
||||
|
||||
<p>Backup <br-setting>repo1-retention-diff=1</br-setting> so two differentials will need to be performed before one is expired. An incremental backup is added to demonstrate incremental expiration. Incremental backups cannot be expired independently &mdash; they are always expired with their related full or differential backup.</p>
|
||||
<p>Backup <br-setting>repo1-retention-diff=1</br-setting> so two differentials will need to be performed before one is expired. An incremental backup is added to demonstrate incremental expiration. Incremental backups cannot be expired independently &mdash; they are always expired with their related full or differential backup.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Perform differential and incremental backups</title>
|
||||
@ -1737,7 +1737,7 @@
|
||||
<section id="archive">
|
||||
<title>Archive Retention</title>
|
||||
|
||||
<p>Although <backrest/> automatically removes archived WAL segments when expiring backups (the default expires WAL for full backups based on the <br-option>repo1-retention-full</br-option> option), it may be useful to expire archive more aggressively to save disk space. Note that full backups are treated as differential backups for the purpose of differential archive retention.</p>
|
||||
<p>Although <backrest/> automatically removes archived WAL segments when expiring backups (the default expires WAL for full backups based on the <br-option>repo1-retention-full</br-option> option), it may be useful to expire archive more aggressively to save disk space. Note that full backups are treated as differential backups for the purpose of differential archive retention.</p>
|
||||
|
||||
<p>Expiring archive will never remove WAL segments that are required to make a backup consistent. However, since Point-in-Time-Recovery (PITR) only works on a continuous WAL stream, care should be taken when aggressively expiring archive outside of the normal backup expiration process. To determine what will be expired without actually expiring anything, the <br-option>dry-run</br-option> option can be provided on the command line with the <cmd>expire</cmd> command.</p>
|
||||
|
||||
@ -1800,16 +1800,16 @@
|
||||
<section id="ownership">
|
||||
<title>File Ownership</title>
|
||||
|
||||
<p>If a <cmd>restore</cmd> is run as a non-root user (the typical scenario) then all files restored will belong to the user/group executing <backrest/>. If existing files are not owned by the executing user/group then an error will result if the ownership cannot be updated to the executing user/group. In that case the file ownership will need to be updated by a privileged user before the restore can be retried.</p>
|
||||
<p>If a <cmd>restore</cmd> is run as a non-root user (the typical scenario) then all files restored will belong to the user/group executing <backrest/>. If existing files are not owned by the executing user/group then an error will result if the ownership cannot be updated to the executing user/group. In that case the file ownership will need to be updated by a privileged user before the restore can be retried.</p>
|
||||
|
||||
<p>If a <cmd>restore</cmd> is run as the <id>root</id> user then <backrest/> will attempt to recreate the ownership recorded in the manifest when the backup was made. Only user/group <b>names</b> are stored in the manifest so the same names must exist on the restore host for this to work. If the user/group name cannot be found locally then the user/group of the <postgres/> data directory will be used and finally <id>root</id> if the data directory user/group cannot be mapped to a name.</p>
|
||||
<p>If a <cmd>restore</cmd> is run as the <id>root</id> user then <backrest/> will attempt to recreate the ownership recorded in the manifest when the backup was made. Only user/group <b>names</b> are stored in the manifest so the same names must exist on the restore host for this to work. If the user/group name cannot be found locally then the user/group of the <postgres/> data directory will be used and finally <id>root</id> if the data directory user/group cannot be mapped to a name.</p>
|
||||
</section>
|
||||
|
||||
<!-- =================================================================================================================== -->
|
||||
<section id="option-delta">
|
||||
<title>Delta Option</title>
|
||||
|
||||
<p><link section="/quickstart/perform-restore">Restore a Backup</link> in <link section="/quickstart">Quick Start</link> required the database cluster directory to be cleaned before the <cmd>restore</cmd> could be performed. The <br-option>delta</br-option> option allows <backrest/> to automatically determine which files in the database cluster directory can be preserved and which ones need to be restored from the backup &mdash; it also <i>removes</i> files not present in the backup manifest so it will dispose of divergent changes. This is accomplished by calculating a <link url="https://en.wikipedia.org/wiki/SHA-1">SHA-1</link> cryptographic hash for each file in the database cluster directory. If the <id>SHA-1</id> hash does not match the hash stored in the backup then that file will be restored. This operation is very efficient when combined with the <br-option>process-max</br-option> option. Since the <postgres/> server is shut down during the restore, a larger number of processes can be used than might be desirable during a backup when the <postgres/> server is running.</p>
|
||||
<p><link section="/quickstart/perform-restore">Restore a Backup</link> in <link section="/quickstart">Quick Start</link> required the database cluster directory to be cleaned before the <cmd>restore</cmd> could be performed. The <br-option>delta</br-option> option allows <backrest/> to automatically determine which files in the database cluster directory can be preserved and which ones need to be restored from the backup &mdash; it also <i>removes</i> files not present in the backup manifest so it will dispose of divergent changes. This is accomplished by calculating a <link url="https://en.wikipedia.org/wiki/SHA-1">SHA-1</link> cryptographic hash for each file in the database cluster directory. If the <id>SHA-1</id> hash does not match the hash stored in the backup then that file will be restored. This operation is very efficient when combined with the <br-option>process-max</br-option> option. Since the <postgres/> server is shut down during the restore, a larger number of processes can be used than might be desirable during a backup when the <postgres/> server is running.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Stop the {[postgres-cluster-demo]} cluster, perform delta restore</title>
|
||||
@ -1842,7 +1842,7 @@
|
||||
<section id="option-db-include">
|
||||
<title>Restore Selected Databases</title>
|
||||
|
||||
<p>There may be cases where it is desirable to selectively restore specific databases from a cluster backup. This could be done for performance reasons or to move selected databases to a machine that does not have enough space to restore the entire cluster backup.</p>
|
||||
<p>There may be cases where it is desirable to selectively restore specific databases from a cluster backup. This could be done for performance reasons or to move selected databases to a machine that does not have enough space to restore the entire cluster backup.</p>
|
||||
|
||||
<p>To demonstrate this feature two databases are created: test1 and test2.</p>
|
||||
|
||||
@ -1892,7 +1892,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>One of the main reasons to use selective restore is to save space. The size of the test1 database is shown here so it can be compared with the disk utilization after a selective restore.</p>
|
||||
<p>One of the main reasons to use selective restore is to save space. The size of the test1 database is shown here so it can be compared with the disk utilization after a selective restore.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Show space used by test1 database</title>
|
||||
@ -1926,7 +1926,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>Stop the cluster and restore only the test2 database. Built-in databases (<id>template0</id>, <id>template1</id>, and <id>postgres</id>) are always restored.</p>
|
||||
<p>Stop the cluster and restore only the test2 database. Built-in databases (<id>template0</id>, <id>template1</id>, and <id>postgres</id>) are always restored.</p>
|
||||
|
||||
<admonition type="warning">Recovery may error unless <br-option>--type=immediate</br-option> is specified. This is because after consistency is reached <postgres/> will flag zeroed pages as errors even for a full-page write. For <postgres/> &ge; <proper>13</proper> the <pg-option>ignore_invalid_pages</pg-option> setting may be used to ignore invalid pages. In this case it is important to check the logs after recovery to ensure that no invalid pages were reported in the selected databases.</admonition>
|
||||
|
||||
@ -1963,7 +1963,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>The test1 database, despite successful recovery, is not accessible. This is because the entire database was restored as sparse, zeroed files. <postgres/> can successfully apply WAL on the zeroed files but the database as a whole will not be valid because key files contain no data. This is purposeful to prevent the database from being accidentally used when it might contain partial data that was applied during WAL replay.</p>
|
||||
<p>The test1 database, despite successful recovery, is not accessible. This is because the entire database was restored as sparse, zeroed files. <postgres/> can successfully apply WAL on the zeroed files but the database as a whole will not be valid because key files contain no data. This is purposeful to prevent the database from being accidentally used when it might contain partial data that was applied during WAL replay.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Attempting to connect to the test1 database will produce an error</title>
|
||||
@ -1976,7 +1976,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>Since the test1 database is restored with sparse, zeroed files it will only require as much space as the amount of WAL that is written during recovery. While the amount of WAL generated during a backup and applied during recovery can be significant it will generally be a small fraction of the total database size, especially for large databases where this feature is most likely to be useful.</p>
|
||||
<p>Since the test1 database is restored with sparse, zeroed files it will only require as much space as the amount of WAL that is written during recovery. While the amount of WAL generated during a backup and applied during recovery can be significant it will generally be a small fraction of the total database size, especially for large databases where this feature is most likely to be useful.</p>
|
||||
|
||||
<p>It is clear that the test1 database uses far less disk space during the selective restore than it would have if the entire database had been restored.</p>
|
||||
|
||||
@ -2044,7 +2044,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>It is important to represent the time as reckoned by <postgres/> and to include timezone offsets. This reduces the possibility of unintended timezone conversions and an unexpected recovery result.</p>
|
||||
<p>It is important to represent the time as reckoned by <postgres/> and to include timezone offsets. This reduces the possibility of unintended timezone conversions and an unexpected recovery result.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Get the time from <postgres/></title>
|
||||
@ -2064,7 +2064,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>Now that the time has been recorded the table is dropped. In practice finding the exact time that the table was dropped is a lot harder than in this example. It may not be possible to find the exact time, but some forensic work should be able to get you close.</p>
|
||||
<p>Now that the time has been recorded the table is dropped. In practice finding the exact time that the table was dropped is a lot harder than in this example. It may not be possible to find the exact time, but some forensic work should be able to get you close.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Drop the important table</title>
|
||||
@ -2122,7 +2122,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>The <postgres/> log also contains valuable information. It will indicate the time and transaction where the recovery stopped and also give the time of the last transaction to be applied.</p>
|
||||
<p>The <postgres/> log also contains valuable information. It will indicate the time and transaction where the recovery stopped and also give the time of the last transaction to be applied.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Examine the <postgres/> log output</title>
|
||||
@ -2133,7 +2133,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>This example was rigged to give the correct result. If a backup after the required time is chosen then <postgres/> will not be able to recover the lost table. <postgres/> can only play forward, not backward. To demonstrate this the important table must be dropped (again).</p>
|
||||
<p>This example was rigged to give the correct result. If a backup after the required time is chosen then <postgres/> will not be able to recover the lost table. <postgres/> can only play forward, not backward. To demonstrate this the important table must be dropped (again).</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Drop the important table (again)</title>
|
||||
@ -2197,7 +2197,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>Looking at the log output it's not obvious that recovery failed to restore the table. The key is to look for the presence of the <quote>recovery stopping before...</quote> and <quote>last completed transaction...</quote> log messages. If they are not present then the recovery to the specified point-in-time was not successful.</p>
|
||||
<p>Looking at the log output it's not obvious that recovery failed to restore the table. The key is to look for the presence of the <quote>recovery stopping before...</quote> and <quote>last completed transaction...</quote> log messages. If they are not present then the recovery to the specified point-in-time was not successful.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Examine the <postgres/> log output to discover the recovery was not successful</title>
|
||||
@ -2461,7 +2461,7 @@
|
||||
<section id="repo-host" depend="/quickstart/configure-archiving">
|
||||
<title>Dedicated Repository Host</title>
|
||||
|
||||
<p>The configuration described in <link section="/quickstart">Quickstart</link> is suitable for simple installations but for enterprise configurations it is more typical to have a dedicated <host>repository</host> host where the backups and WAL archive files are stored. This separates the backups and WAL archive from the database server so <host>database</host> host failures have less impact. It is still a good idea to employ traditional backup software to backup the <host>repository</host> host.</p>
|
||||
<p>The configuration described in <link section="/quickstart">Quickstart</link> is suitable for simple installations but for enterprise configurations it is more typical to have a dedicated <host>repository</host> host where the backups and WAL archive files are stored. This separates the backups and WAL archive from the database server so <host>database</host> host failures have less impact. It is still a good idea to employ traditional backup software to backup the <host>repository</host> host.</p>
|
||||
|
||||
<p>On <postgres/> hosts, <br-option>pg1-path</br-option> is required to be the path of the local PostgreSQL cluster and no <br-option>pg1-host</br-option> should be configured. When configuring a repository host, the pgbackrest configuration file must have the <br-option>pg-host</br-option> option configured to connect to the primary and standby (if any) hosts. The repository host has the only pgbackrest configuration that should be aware of more than one <postgres/> host. Order does not matter, e.g. pg1-path/pg1-host, pg2-path/pg2-host can be primary or standby.</p>
|
||||
|
||||
@ -2475,7 +2475,7 @@
|
||||
|
||||
<host-add id="{[host-repo1-id]}" name="{[host-repo1]}" user="{[host-repo1-user]}" image="{[host-repo1-image]}" os="{[os-type]}" mount="{[host-repo1-mount]}" option="{[host-mem]} {[host-option]}"/>
|
||||
|
||||
<p>The <user>{[br-user]}</user> user is created to own the <backrest/> repository. Any user can own the repository but it is best not to use <user>postgres</user> (if it exists) to avoid confusion.</p>
|
||||
<p>The <user>{[br-user]}</user> user is created to own the <backrest/> repository. Any user can own the repository but it is best not to use <user>postgres</user> (if it exists) to avoid confusion.</p>
|
||||
|
||||
<execute-list host="{[host-repo1]}">
|
||||
<title>Create <user>{[br-user]}</user> user</title>
|
||||
@ -2531,7 +2531,7 @@
|
||||
<block-variable-replace key="setup-ssh-user-home-path">{[pg-home-path]}</block-variable-replace>
|
||||
</block>
|
||||
|
||||
<admonition type="note">ssh has been configured to only allow <backrest/> to be run via passwordless ssh. This enhances security in the event that one of the service accounts is hijacked.</admonition>
|
||||
<admonition type="note">ssh has been configured to only allow <backrest/> to be run via passwordless ssh. This enhances security in the event that one of the service accounts is hijacked.</admonition>
|
||||
|
||||
<!-- <block if="{[pg-version]} >= 11" id="setup-ssh">
|
||||
<block-variable-replace key="setup-ssh-host">{[host-pg1]}</block-variable-replace>
|
||||
@ -2554,7 +2554,7 @@
|
||||
<backrest-config-option section="global" key="repo1-path">{[backrest-repo-path]}</backrest-config-option>
|
||||
</backrest-config>
|
||||
|
||||
<p>The <host>repository</host> host must be configured with the <host>{[host-pg1]}</host> host/user and database path. The primary will be configured as <id>pg1</id> to allow a standby to be added later.</p>
|
||||
<p>The <host>repository</host> host must be configured with the <host>{[host-pg1]}</host> host/user and database path. The primary will be configured as <id>pg1</id> to allow a standby to be added later.</p>
|
||||
|
||||
<backrest-config host="{[host-repo1]}" owner="{[br-user]}:{[br-group]}" file="{[backrest-config-demo]}">
|
||||
<title>Configure <br-option>pg1-host</br-option>/<br-option>pg1-host-user</br-option> and <br-option>pg1-path</br-option></title>
|
||||
@ -2581,7 +2581,7 @@
|
||||
<backrest-config-option section="global" key="log-timestamp">n</backrest-config-option>
|
||||
</backrest-config>
|
||||
|
||||
<p>The database host must be configured with the repository host/user. The default for the <br-option>repo1-host-user</br-option> option is <id>pgbackrest</id>. If the <id>postgres</id> user does restores on the repository host it is best not to also allow the <id>postgres</id> user to perform backups. However, the <id>postgres</id> user can read the repository directly if it is in the same group as the <id>pgbackrest</id> user.</p>
|
||||
<p>The database host must be configured with the repository host/user. The default for the <br-option>repo1-host-user</br-option> option is <id>pgbackrest</id>. If the <id>postgres</id> user does restores on the repository host it is best not to also allow the <id>postgres</id> user to perform backups. However, the <id>postgres</id> user can read the repository directly if it is in the same group as the <id>pgbackrest</id> user.</p>
|
||||
|
||||
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}" reset="y">
|
||||
<title>Configure <br-option>repo1-host</br-option>/<br-option>repo1-host-user</br-option></title>
|
||||
@ -2761,9 +2761,9 @@
|
||||
|
||||
<p><backrest/> offers parallel processing to improve performance of compression and transfer. The number of processes to be used for this feature is set using the <br-option>--process-max</br-option> option.</p>
|
||||
|
||||
<p>It is usually best not to use more than 25% of available CPUs for the <cmd>backup</cmd> command. Backups don't have to run that fast as long as they are performed regularly and the backup process should not impact database performance, if at all possible.</p>
|
||||
<p>It is usually best not to use more than 25% of available CPUs for the <cmd>backup</cmd> command. Backups don't have to run that fast as long as they are performed regularly and the backup process should not impact database performance, if at all possible.</p>
|
||||
|
||||
<p>The restore command can and should use all available CPUs because during a restore the <postgres/> cluster is shut down and there is generally no other important work being done on the host. If the host contains multiple clusters then that should be considered when setting restore parallelism.</p>
|
||||
<p>The restore command can and should use all available CPUs because during a restore the <postgres/> cluster is shut down and there is generally no other important work being done on the host. If the host contains multiple clusters then that should be considered when setting restore parallelism.</p>
|
||||
|
||||
<execute-list host="{[host-repo1]}">
|
||||
<title>Perform a backup with single process</title>
|
||||
@ -2796,14 +2796,14 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>The performance of the last backup should be improved by using multiple processes. For very small backups the difference may not be very apparent, but as the size of the database increases so will time savings.</p>
|
||||
<p>The performance of the last backup should be improved by using multiple processes. For very small backups the difference may not be very apparent, but as the size of the database increases so will time savings.</p>
|
||||
</section>
|
||||
|
||||
<!-- ======================================================================================================================= -->
|
||||
<section id="start-stop" depend="/repo-host/config">
|
||||
<title>Starting and Stopping</title>
|
||||
|
||||
<p>Sometimes it is useful to prevent <backrest/> from running on a system. For example, when failing over from a primary to a standby it's best to prevent <backrest/> from running on the old primary in case <postgres/> gets restarted or can't be completely killed. This will also prevent <backrest/> from running on <id>cron</id>.</p>
|
||||
<p>Sometimes it is useful to prevent <backrest/> from running on a system. For example, when failing over from a primary to a standby it's best to prevent <backrest/> from running on the old primary in case <postgres/> gets restarted or can't be completely killed. This will also prevent <backrest/> from running on <id>cron</id>.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Stop the <backrest/> services</title>
|
||||
@ -2824,7 +2824,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>Specify the <br-option>--force</br-option> option to terminate any <backrest/> process that are currently running. If <backrest/> is already stopped then stopping again will generate a warning.</p>
|
||||
<p>Specify the <br-option>--force</br-option> option to terminate any <backrest/> process that are currently running. If <backrest/> is already stopped then stopping again will generate a warning.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Stop the <backrest/> services again</title>
|
||||
@ -2880,7 +2880,7 @@
|
||||
<section id="replication" depend="/repo-host/perform-backup">
|
||||
<title>Replication</title>
|
||||
|
||||
<p>Replication allows multiple copies of a <postgres/> cluster (called standbys) to be created from a single primary. The standbys are useful for balancing reads and to provide redundancy in case the primary host fails.</p>
|
||||
<p>Replication allows multiple copies of a <postgres/> cluster (called standbys) to be created from a single primary. The standbys are useful for balancing reads and to provide redundancy in case the primary host fails.</p>
|
||||
|
||||
<!-- =================================================================================================================== -->
|
||||
<section id="installation">
|
||||
@ -3014,7 +3014,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>The <pg-setting>hot_standby</pg-setting> setting must be enabled before starting <postgres/> to allow read-only connections on <host>{[host-pg2]}</host>. Otherwise, connection attempts will be refused. The rest of the configuration is in case the standby is promoted to a primary.</p>
|
||||
<p>The <pg-setting>hot_standby</pg-setting> setting must be enabled before starting <postgres/> to allow read-only connections on <host>{[host-pg2]}</host>. Otherwise, connection attempts will be refused. The rest of the configuration is in case the standby is promoted to a primary.</p>
|
||||
|
||||
<postgres-config host="{[host-pg2]}" file="{[postgres-config-demo]}">
|
||||
<title>Configure <postgres/></title>
|
||||
@ -3043,7 +3043,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>The <postgres/> log gives valuable information about the recovery. Note especially that the cluster has entered standby mode and is ready to accept read-only connections.</p>
|
||||
<p>The <postgres/> log gives valuable information about the recovery. Note especially that the cluster has entered standby mode and is ready to accept read-only connections.</p>
|
||||
|
||||
<execute-list host="{[host-pg2]}">
|
||||
<title>Examine the <postgres/> log output for log messages indicating success</title>
|
||||
@ -3083,7 +3083,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>So, what went wrong? Since <postgres/> is pulling WAL segments from the archive to perform replication, changes won't be seen on the standby until the WAL segment that contains those changes is pushed from <host>{[host-pg1]}</host>.</p>
|
||||
<p>So, what went wrong? Since <postgres/> is pulling WAL segments from the archive to perform replication, changes won't be seen on the standby until the WAL segment that contains those changes is pushed from <host>{[host-pg1]}</host>.</p>
|
||||
|
||||
<p>This can be done manually by calling <code>{[pg-switch-wal]}()</code> which pushes the current WAL segment to the archive (a new WAL segment is created to contain further changes).</p>
|
||||
|
||||
@ -3125,7 +3125,7 @@
|
||||
<section id="streaming">
|
||||
<title>Streaming Replication</title>
|
||||
|
||||
<p>Instead of relying solely on the WAL archive, streaming replication makes a direct connection to the primary and applies changes as soon as they are made on the primary. This results in much less lag between the primary and standby.</p>
|
||||
<p>Instead of relying solely on the WAL archive, streaming replication makes a direct connection to the primary and applies changes as soon as they are made on the primary. This results in much less lag between the primary and standby.</p>
|
||||
|
||||
<p>Streaming replication requires a user with the replication privilege.</p>
|
||||
|
||||
@ -3140,7 +3140,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>The <file>pg_hba.conf</file> file must be updated to allow the standby to connect as the replication user. Be sure to replace the IP address below with the actual IP address of your <host>{[host-pg2]}</host>. A reload will be required after modifying the <file>pg_hba.conf</file> file.</p>
|
||||
<p>The <file>pg_hba.conf</file> file must be updated to allow the standby to connect as the replication user. Be sure to replace the IP address below with the actual IP address of your <host>{[host-pg2]}</host>. A reload will be required after modifying the <file>pg_hba.conf</file> file.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Create <file>pg_hba.conf</file> entry for replication user</title>
|
||||
@ -3204,7 +3204,7 @@
|
||||
|
||||
<admonition type="note">The <pg-setting>primary_conninfo</pg-setting> setting has been written into the <file>{[pg-recovery-file-demo]}</file> file because it was configured as a <br-option>recovery-option</br-option> in <file>{[project-exe]}.conf</file>. The <br-setting>{[dash]}-type=preserve</br-setting> option can be used with the <cmd>restore</cmd> to leave the existing <file>{[pg-recovery-file-demo]}</file> file in place if that behavior is preferred.</admonition>
|
||||
|
||||
<p if="{[os-type-is-rhel]}">By default {[user-guide-os]} stores the <file>postgresql.conf</file> file in the <postgres/> data directory. That means the change made to <file>postgresql.conf</file> was overwritten by the last restore and the <pg-option>hot_standby</pg-option> setting must be enabled again. Other solutions to this problem are to store the <file>postgresql.conf</file> file elsewhere or to enable the <pg-option>hot_standby</pg-option> setting on the <host>{[host-pg1]}</host> host where it will be ignored.</p>
|
||||
<p if="{[os-type-is-rhel]}">By default {[user-guide-os]} stores the <file>postgresql.conf</file> file in the <postgres/> data directory. That means the change made to <file>postgresql.conf</file> was overwritten by the last restore and the <pg-option>hot_standby</pg-option> setting must be enabled again. Other solutions to this problem are to store the <file>postgresql.conf</file> file elsewhere or to enable the <pg-option>hot_standby</pg-option> setting on the <host>{[host-pg1]}</host> host where it will be ignored.</p>
|
||||
|
||||
<postgres-config host="{[host-pg2]}" if="{[os-type-is-rhel]}" file="{[postgres-config-demo]}">
|
||||
<title>Enable <pg-option>hot_standby</pg-option></title>
|
||||
@ -3273,9 +3273,9 @@
|
||||
<section id="async-archiving" depend="/replication">
|
||||
<title>Asynchronous Archiving</title>
|
||||
|
||||
<p>Asynchronous archiving is enabled with the <br-option>archive-async</br-option> option. This option enables asynchronous operation for both the <cmd>archive-push</cmd> and <cmd>archive-get</cmd> commands.</p>
|
||||
<p>Asynchronous archiving is enabled with the <br-option>archive-async</br-option> option. This option enables asynchronous operation for both the <cmd>archive-push</cmd> and <cmd>archive-get</cmd> commands.</p>
|
||||
|
||||
<p>A spool path is required. The commands will store transient data here but each command works quite a bit differently so spool path usage is described in detail in each section.</p>
|
||||
<p>A spool path is required. The commands will store transient data here but each command works quite a bit differently so spool path usage is described in detail in each section.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Create the spool directory</title>
|
||||
@ -3299,7 +3299,7 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>The spool path must be configured and asynchronous archiving enabled. Asynchronous archiving automatically confers some benefit by reducing the number of connections made to remote storage, but setting <br-option>process-max</br-option> can drastically improve performance by parallelizing operations. Be sure not to set <br-option>process-max</br-option> so high that it affects normal database operations.</p>
|
||||
<p>The spool path must be configured and asynchronous archiving enabled. Asynchronous archiving automatically confers some benefit by reducing the number of connections made to remote storage, but setting <br-option>process-max</br-option> can drastically improve performance by parallelizing operations. Be sure not to set <br-option>process-max</br-option> so high that it affects normal database operations.</p>
|
||||
|
||||
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
||||
<title>Configure the spool path and asynchronous archiving</title>
|
||||
@ -3319,7 +3319,7 @@
|
||||
<backrest-config-option section="global:archive-get" key="process-max">2</backrest-config-option>
|
||||
</backrest-config>
|
||||
|
||||
<admonition type="note"><br-option>process-max</br-option> is configured using command sections so that the option is not used by backup and restore. This also allows different values for <cmd>archive-push</cmd> and <cmd>archive-get</cmd>.</admonition>
|
||||
<admonition type="note"><br-option>process-max</br-option> is configured using command sections so that the option is not used by backup and restore. This also allows different values for <cmd>archive-push</cmd> and <cmd>archive-get</cmd>.</admonition>
|
||||
|
||||
<p>For demonstration purposes streaming replication will be broken to force <postgres/> to get WAL using the <pg-option>restore_command</pg-option>.</p>
|
||||
|
||||
@ -3345,13 +3345,13 @@
|
||||
<section id="async-archive-push">
|
||||
<title>Archive Push</title>
|
||||
|
||||
<p>The asynchronous <cmd>archive-push</cmd> command offloads WAL archiving to a separate process (or processes) to improve throughput. It works by <quote>looking ahead</quote> to see which WAL segments are ready to be archived beyond the request that <postgres/> is currently making via the <code>archive_command</code>. WAL segments are transferred to the archive directly from the <path>pg_xlog</path>/<path>pg_wal</path> directory and success is only returned by the <code>archive_command</code> when the WAL segment has been safely stored in the archive.</p>
|
||||
<p>The asynchronous <cmd>archive-push</cmd> command offloads WAL archiving to a separate process (or processes) to improve throughput. It works by <quote>looking ahead</quote> to see which WAL segments are ready to be archived beyond the request that <postgres/> is currently making via the <code>archive_command</code>. WAL segments are transferred to the archive directly from the <path>pg_xlog</path>/<path>pg_wal</path> directory and success is only returned by the <code>archive_command</code> when the WAL segment has been safely stored in the archive.</p>
|
||||
|
||||
<p>The spool path holds the current status of WAL archiving. Status files written into the spool directory are typically zero length and should consume a minimal amount of space (a few MB at most) and very little IO. All the information in this directory can be recreated so it is not necessary to preserve the spool directory if the cluster is moved to new hardware.</p>
|
||||
<p>The spool path holds the current status of WAL archiving. Status files written into the spool directory are typically zero length and should consume a minimal amount of space (a few MB at most) and very little IO. All the information in this directory can be recreated so it is not necessary to preserve the spool directory if the cluster is moved to new hardware.</p>
|
||||
|
||||
<admonition type="important">In the original implementation of asynchronous archiving, WAL segments were copied to the spool directory before compression and transfer. The new implementation copies WAL directly from the <path>pg_xlog</path> directory. If asynchronous archiving was utilized in <proper>v1.12</proper> or prior, read the <proper>v1.13</proper> release notes carefully before upgrading.</admonition>
|
||||
<admonition type="important">In the original implementation of asynchronous archiving, WAL segments were copied to the spool directory before compression and transfer. The new implementation copies WAL directly from the <path>pg_xlog</path> directory. If asynchronous archiving was utilized in <proper>v1.12</proper> or prior, read the <proper>v1.13</proper> release notes carefully before upgrading.</admonition>
|
||||
|
||||
<p>The <file>[stanza]-archive-push-async.log</file> file can be used to monitor the activity of the asynchronous process. A good way to test this is to quickly push a number of WAL segments.</p>
|
||||
<p>The <file>[stanza]-archive-push-async.log</file> file can be used to monitor the activity of the asynchronous process. A good way to test this is to quickly push a number of WAL segments.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Test parallel asynchronous archiving</title>
|
||||
@ -3393,9 +3393,9 @@
|
||||
<section id="async-archive-get">
|
||||
<title>Archive Get</title>
|
||||
|
||||
<p>The asynchronous <cmd>archive-get</cmd> command maintains a local queue of WAL to improve throughput. If a WAL segment is not found in the queue it is fetched from the repository along with enough consecutive WAL to fill the queue. The maximum size of the queue is defined by <br-option>archive-get-queue-max</br-option>. Whenever the queue is less than half full more WAL will be fetched to fill it.</p>
|
||||
<p>The asynchronous <cmd>archive-get</cmd> command maintains a local queue of WAL to improve throughput. If a WAL segment is not found in the queue it is fetched from the repository along with enough consecutive WAL to fill the queue. The maximum size of the queue is defined by <br-option>archive-get-queue-max</br-option>. Whenever the queue is less than half full more WAL will be fetched to fill it.</p>
|
||||
|
||||
<p>Asynchronous operation is most useful in environments that generate a lot of WAL or have a high latency connection to the repository storage (i.e., <proper>S3</proper> or other object stores). In the case of a high latency connection it may be a good idea to increase <br-option>process-max</br-option>.</p>
|
||||
<p>Asynchronous operation is most useful in environments that generate a lot of WAL or have a high latency connection to the repository storage (i.e., <proper>S3</proper> or other object stores). In the case of a high latency connection it may be a good idea to increase <br-option>process-max</br-option>.</p>
|
||||
|
||||
<p>The <file>[stanza]-archive-get-async.log</file> file can be used to monitor the activity of the asynchronous process.</p>
|
||||
|
||||
@ -3427,7 +3427,7 @@
|
||||
<section id="standby-backup" depend="/replication/streaming">
|
||||
<title>Backup from a Standby</title>
|
||||
|
||||
<p><backrest/> can perform backups on a standby instead of the primary. Standby backups require the <host>{[host-pg2]}</host> host to be configured and the <br-option>backup-standby</br-option> option enabled. If more than one standby is configured then the first running standby found will be used for the backup.</p>
|
||||
<p><backrest/> can perform backups on a standby instead of the primary. Standby backups require the <host>{[host-pg2]}</host> host to be configured and the <br-option>backup-standby</br-option> option enabled. If more than one standby is configured then the first running standby found will be used for the backup.</p>
|
||||
|
||||
<backrest-config host="{[host-repo1]}" owner="{[br-user]}:{[br-group]}" file="{[backrest-config-demo]}">
|
||||
<title>Configure <br-option>pg2-host</br-option>/<br-option>pg2-host-user</br-option> and <br-option>pg2-path</br-option></title>
|
||||
@ -3445,7 +3445,7 @@
|
||||
<backrest-config-option section="global" key="backup-standby">y</backrest-config-option>
|
||||
</backrest-config>
|
||||
|
||||
<p>Both the primary and standby databases are required to perform the backup, though the vast majority of the files will be copied from the standby to reduce load on the primary. The database hosts can be configured in any order. <backrest/> will automatically determine which is the primary and which is the standby.</p>
|
||||
<p>Both the primary and standby databases are required to perform the backup, though the vast majority of the files will be copied from the standby to reduce load on the primary. The database hosts can be configured in any order. <backrest/> will automatically determine which is the primary and which is the standby.</p>
|
||||
|
||||
<execute-list host="{[host-repo1]}">
|
||||
<title>Backup the {[postgres-cluster-demo]} cluster from <host>pg2</host></title>
|
||||
@ -3458,7 +3458,7 @@
|
||||
|
||||
<p>This incremental backup shows that most of the files are copied from the <host>{[host-pg2]}</host> host and only a few are copied from the <host>{[host-pg1]}</host> host.</p>
|
||||
|
||||
<p><backrest/> creates a standby backup that is identical to a backup performed on the primary. It does this by starting/stopping the backup on the <host>{[host-pg1]}</host> host, copying only files that are replicated from the <host>{[host-pg2]}</host> host, then copying the remaining few files from the <host>{[host-pg1]}</host> host. This means that logs and statistics from the primary database will be included in the backup.</p>
|
||||
<p><backrest/> creates a standby backup that is identical to a backup performed on the primary. It does this by starting/stopping the backup on the <host>{[host-pg1]}</host> host, copying only files that are replicated from the <host>{[host-pg2]}</host> host, then copying the remaining few files from the <host>{[host-pg1]}</host> host. This means that logs and statistics from the primary database will be included in the backup.</p>
|
||||
</section>
|
||||
|
||||
<!-- ===========================================================================================================================
|
||||
|
@ -3,7 +3,7 @@
|
||||
####################################################################################################################################
|
||||
|
||||
####################################################################################################################################
|
||||
# List of required source files. main.c should always be listed last and the rest in alpha order.
|
||||
# List of required source files. main.c should always be listed last and the rest in alpha order.
|
||||
####################################################################################################################################
|
||||
SRCS_COMMON = \
|
||||
common/compress/bz2/common.c \
|
||||
@ -249,14 +249,14 @@ postgres/interface.auto.c.inc: build-code
|
||||
./build-code postgres $(VPATH)
|
||||
|
||||
####################################################################################################################################
|
||||
# Installation. DESTDIR can be used to modify the install location.
|
||||
# Installation. DESTDIR can be used to modify the install location.
|
||||
####################################################################################################################################
|
||||
install: pgbackrest
|
||||
install -d $(DESTDIR)$(bindir)
|
||||
install -m 755 pgbackrest $(DESTDIR)$(bindir)
|
||||
|
||||
####################################################################################################################################
|
||||
# Uninstallation. DESTDIR should be set to the same value as when installed.
|
||||
# Uninstallation. DESTDIR should be set to the same value as when installed.
|
||||
####################################################################################################################################
|
||||
uninstall:
|
||||
rm -f $(DESTDIR)$(bindir)/pgbackrest
|
||||
|
@ -9,10 +9,10 @@ Regular Expression Handler Extensions
|
||||
/***********************************************************************************************************************************
|
||||
Getters/Setters
|
||||
***********************************************************************************************************************************/
|
||||
// Get pointer to the last match. NULL if there was no match.
|
||||
// Get pointer to the last match. NULL if there was no match.
|
||||
const char *regExpMatchPtr(RegExp *this, const String *string);
|
||||
|
||||
// Get the last match as a String. NULL if there was no match.
|
||||
// Get the last match as a String. NULL if there was no match.
|
||||
String *regExpMatchStr(RegExp *this, const String *string);
|
||||
|
||||
#endif
|
||||
|
@ -9,7 +9,7 @@ AC_CONFIG_AUX_DIR(build)
|
||||
# ----------------------------------------------------------------------------------------------------------------------------------
|
||||
: ${CFLAGS=""}
|
||||
|
||||
# Build C standard based on the host type. C99 is required and other flags are added depending on the host.
|
||||
# Build C standard based on the host type. C99 is required and other flags are added depending on the host.
|
||||
# ----------------------------------------------------------------------------------------------------------------------------------
|
||||
AC_PROG_CC
|
||||
AC_CANONICAL_HOST
|
||||
|
@ -75,13 +75,13 @@ path-close: 97
|
||||
# Unable to get info for a file
|
||||
file-info: 98
|
||||
|
||||
# Invalid JSON format. Eventually this should be a child of format error and share the same code
|
||||
# Invalid JSON format. Eventually this should be a child of format error and share the same code
|
||||
json-format: 99
|
||||
|
||||
# An error from the kernel that there's nothing we can do about. It should always be fatal.
|
||||
# An error from the kernel that there's nothing we can do about. It should always be fatal.
|
||||
kernel: 100
|
||||
|
||||
# An error from a service that is not our fault, e.g. 5xx errors from an http server. These may be retried.
|
||||
# An error from a service that is not our fault, e.g. 5xx errors from an http server. These may be retried.
|
||||
service: 101
|
||||
|
||||
# An error while attempting to execute a binary
|
||||
|
@ -263,7 +263,7 @@ archiveAsyncExec(ArchiveMode archiveMode, const StringList *commandExec)
|
||||
for (int fd = 3; fd < 1024; fd++)
|
||||
close(fd);
|
||||
|
||||
// Execute the binary. This statement will not return if it is successful.
|
||||
// Execute the binary. This statement will not return if it is successful.
|
||||
THROW_ON_SYS_ERROR_FMT(
|
||||
execvp(strZ(strLstGet(commandExec, 0)), (char **const)strLstPtr(commandExec)) == -1, ExecuteError,
|
||||
"unable to execute asynchronous '%s'", archiveMode == archiveModeGet ? CFGCMD_ARCHIVE_GET : CFGCMD_ARCHIVE_PUSH);
|
||||
@ -279,12 +279,12 @@ archiveAsyncExec(ArchiveMode archiveMode, const StringList *commandExec)
|
||||
|
||||
THROW_ON_SYS_ERROR(waitpid(pid, &processStatus, 0) == -1, ExecuteError, "unable to wait for forked process");
|
||||
|
||||
// The first fork should exit with success. If not, something went wrong during the second fork.
|
||||
// The first fork should exit with success. If not, something went wrong during the second fork.
|
||||
CHECK(ExecuteError, WIFEXITED(processStatus) && WEXITSTATUS(processStatus) == 0, "error on first fork");
|
||||
|
||||
#ifdef DEBUG_EXEC_TIME
|
||||
// If the process does not exit immediately then something probably went wrong with the double fork. It's possible that this
|
||||
// test will fail on very slow systems so it may need to be tuned. The idea is to make sure that the waitpid() above is not
|
||||
// If the process does not exit immediately then something probably went wrong with the double fork. It's possible that this
|
||||
// test will fail on very slow systems so it may need to be tuned. The idea is to make sure that the waitpid() above is not
|
||||
// waiting on the async process.
|
||||
CHECK(AssertError, timeMSec() - timeBegin < 10, "the process does not exit immediately");
|
||||
#endif
|
||||
|
@ -65,8 +65,8 @@ STRING_DECLARE(WAL_TIMELINE_HISTORY_REGEXP_STR);
|
||||
/***********************************************************************************************************************************
|
||||
Functions
|
||||
***********************************************************************************************************************************/
|
||||
// Remove errors for an archive file. This should be done before forking the async process to prevent a race condition where an
|
||||
// old error may be reported rather than waiting for the async process to succeed or fail.
|
||||
// Remove errors for an archive file. This should be done before forking the async process to prevent a race condition where an old
|
||||
// error may be reported rather than waiting for the async process to succeed or fail.
|
||||
FN_EXTERN void archiveAsyncErrorClear(ArchiveMode archiveMode, const String *archiveFile);
|
||||
|
||||
// Check for ok/error status files in the spool in/out directory. throwOnError determines whether an error will be thrown when an
|
||||
|
@ -540,13 +540,12 @@ queueNeed(const String *walSegment, bool found, uint64_t queueSize, size_t walSe
|
||||
|
||||
MEM_CONTEXT_TEMP_BEGIN()
|
||||
{
|
||||
// Determine the first WAL segment for the async process to get. If the WAL segment requested by
|
||||
// PostgreSQL was not found then use that. If the segment was found but the queue is not full then
|
||||
// start with the next segment.
|
||||
// Determine the first WAL segment for the async process to get. If the WAL segment requested by PostgreSQL was not found
|
||||
// then use that. If the segment was found but the queue is not full then start with the next segment.
|
||||
const String *walSegmentFirst =
|
||||
found ? walSegmentNext(walSegment, walSegmentSize, pgVersion) : walSegment;
|
||||
|
||||
// Determine how many WAL segments should be in the queue. The queue total must be at least 2 or it doesn't make sense to
|
||||
// Determine how many WAL segments should be in the queue. The queue total must be at least 2 or it doesn't make sense to
|
||||
// have async turned on at all.
|
||||
unsigned int walSegmentQueueTotal = (unsigned int)(queueSize / walSegmentSize);
|
||||
|
||||
@ -715,9 +714,9 @@ cmdArchiveGet(void)
|
||||
}
|
||||
}
|
||||
|
||||
// If the WAL segment has not already been found then start the async process to get it. There's no point in
|
||||
// forking the async process off more than once so track that as well. Use an archive lock to prevent forking if
|
||||
// the async process was launched by another process.
|
||||
// If the WAL segment has not already been found then start the async process to get it. There's no point in forking
|
||||
// the async process off more than once so track that as well. Use an archive lock to prevent forking if the async
|
||||
// process was launched by another process.
|
||||
if (!forked && (!found || !queueFull) && lockAcquireP(.returnOnNoLock = true))
|
||||
{
|
||||
// Get control info
|
||||
@ -736,8 +735,8 @@ cmdArchiveGet(void)
|
||||
StringList *commandExec = cfgExecParam(cfgCmdArchiveGet, cfgCmdRoleAsync, optionReplace, true, false);
|
||||
strLstInsert(commandExec, 0, cfgExe());
|
||||
|
||||
// Clean the current queue using the list of WAL that we ideally want in the queue. queueNeed()
|
||||
// will return the list of WAL needed to fill the queue and this will be passed to the async process.
|
||||
// Clean the current queue using the list of WAL that we ideally want in the queue. queueNeed() will return the
|
||||
// list of WAL needed to fill the queue and this will be passed to the async process.
|
||||
const StringList *queue = queueNeed(
|
||||
walSegment, found, cfgOptionUInt64(cfgOptArchiveGetQueueMax), pgControl.walSegmentSize,
|
||||
pgControl.version);
|
||||
@ -754,7 +753,7 @@ cmdArchiveGet(void)
|
||||
// Execute the async process
|
||||
archiveAsyncExec(archiveModeGet, commandExec);
|
||||
|
||||
// Mark the async process as forked so it doesn't get forked again. A single run of the async process should be
|
||||
// Mark the async process as forked so it doesn't get forked again. A single run of the async process should be
|
||||
// enough to do the job, running it again won't help anything.
|
||||
forked = true;
|
||||
}
|
||||
|
@ -134,9 +134,9 @@ archivePushReadyList(const String *walPath)
|
||||
/***********************************************************************************************************************************
|
||||
Determine which WAL files need to be pushed to the archive when in async mode
|
||||
|
||||
This is the heart of the "look ahead" functionality in async archiving. Any files in the out directory that do not end in ok are
|
||||
This is the heart of the "look ahead" functionality in async archiving. Any files in the out directory that do not end in ok are
|
||||
removed and any ok files that do not have a corresponding ready file in archive_status (meaning it has been acknowledged by
|
||||
PostgreSQL) are removed. Then all ready files that do not have a corresponding ok file (meaning it has already been processed) are
|
||||
PostgreSQL) are removed. Then all ready files that do not have a corresponding ok file (meaning it has already been processed) are
|
||||
returned for processing.
|
||||
***********************************************************************************************************************************/
|
||||
static StringList *
|
||||
@ -200,7 +200,7 @@ archivePushProcessList(const String *walPath)
|
||||
/***********************************************************************************************************************************
|
||||
Check that pg_control and archive.info match and get the archive id and archive cipher passphrase (if present)
|
||||
|
||||
As much information as possible is collected here so that async archiving has as little work as possible to do for each file. Sync
|
||||
As much information as possible is collected here so that async archiving has as little work as possible to do for each file. Sync
|
||||
archiving does not benefit but it makes sense to use the same function.
|
||||
***********************************************************************************************************************************/
|
||||
typedef struct ArchivePushCheckResult
|
||||
@ -354,13 +354,13 @@ cmdArchivePush(void)
|
||||
|
||||
do
|
||||
{
|
||||
// Check if the WAL segment has been pushed. Errors will not be thrown on the first try to allow the async process
|
||||
// Check if the WAL segment has been pushed. Errors will not be thrown on the first try to allow the async process
|
||||
// a chance to fix them.
|
||||
pushed = archiveAsyncStatus(archiveModePush, archiveFile, throwOnError, true);
|
||||
|
||||
// If the WAL segment has not already been pushed then start the async process to push it. There's no point in
|
||||
// forking the async process off more than once so track that as well. Use an archive lock to prevent more than
|
||||
// one async process being launched.
|
||||
// If the WAL segment has not already been pushed then start the async process to push it. There's no point in
|
||||
// forking the async process off more than once so track that as well. Use an archive lock to prevent more than one
|
||||
// async process being launched.
|
||||
if (!pushed && !forked && lockAcquireP(.returnOnNoLock = true))
|
||||
{
|
||||
// The async process should not output on the console at all
|
||||
@ -383,7 +383,7 @@ cmdArchivePush(void)
|
||||
// Execute the async process
|
||||
archiveAsyncExec(archiveModePush, commandExec);
|
||||
|
||||
// Mark the async process as forked so it doesn't get forked again. A single run of the async process should be
|
||||
// Mark the async process as forked so it doesn't get forked again. A single run of the async process should be
|
||||
// enough to do the job, running it again won't help anything.
|
||||
forked = true;
|
||||
}
|
||||
|
@ -106,11 +106,11 @@ backupLabelCreate(const BackupType type, const String *const backupLabelPrior, c
|
||||
|
||||
if (backupLabelLatest != NULL && strCmp(result, backupLabelLatest) <= 0)
|
||||
{
|
||||
// If that didn't give us a later label then add one second. It's possible that two backups (they would need to be
|
||||
// If that didn't give us a later label then add one second. It's possible that two backups (they would need to be
|
||||
// offline or halted online) have run very close together.
|
||||
result = backupLabelFormat(type, backupLabelPrior, timestamp + 1);
|
||||
|
||||
// If the label is still not latest then error. There is probably a timezone change or massive clock skew.
|
||||
// If the label is still not latest then error. There is probably a timezone change or massive clock skew.
|
||||
if (strCmp(result, backupLabelLatest) <= 0)
|
||||
{
|
||||
THROW_FMT(
|
||||
@ -243,7 +243,7 @@ backupInit(const InfoBackup *const infoBackup)
|
||||
{
|
||||
cfgOptionSet(cfgOptChecksumPage, cfgSourceParam, VARBOOL(pgControl.pageChecksum));
|
||||
}
|
||||
// Else set to false. An offline cluster is likely to have false positives so better if the user enables manually.
|
||||
// Else set to false. An offline cluster is likely to have false positives so better if the user enables manually.
|
||||
else
|
||||
cfgOptionSet(cfgOptChecksumPage, cfgSourceParam, BOOL_FALSE_VAR);
|
||||
}
|
||||
@ -596,8 +596,8 @@ backupBuildIncrPrior(const InfoBackup *const infoBackup)
|
||||
"%s backup cannot alter " CFGOPT_COMPRESS_TYPE " option to '%s', reset to value in %s",
|
||||
strZ(cfgOptionDisplay(cfgOptType)), strZ(cfgOptionDisplay(cfgOptCompressType)), strZ(backupLabelPrior));
|
||||
|
||||
// Set the compression type back to whatever was in the prior backup. This is not strictly needed since we
|
||||
// could store compression type on a per file basis, but it seems simplest and safest for now.
|
||||
// Set the compression type back to whatever was in the prior backup. This is not strictly needed since we could
|
||||
// store compression type on a per file basis, but it seems simplest and safest for now.
|
||||
cfgOptionSet(
|
||||
cfgOptCompressType, cfgSourceParam, VARSTR(compressTypeStr(manifestPriorData->backupOptionCompressType)));
|
||||
|
||||
@ -614,14 +614,14 @@ backupBuildIncrPrior(const InfoBackup *const infoBackup)
|
||||
cfgOptCompressLevel, cfgSourceParam, VARINT64(varUInt(manifestPriorData->backupOptionCompressLevel)));
|
||||
}
|
||||
|
||||
// If not defined this backup was done in a version prior to page checksums being introduced. Just set
|
||||
// checksum-page to false and move on without a warning. Page checksums will start on the next full backup.
|
||||
// If not defined this backup was done in a version prior to page checksums being introduced. Just set checksum-page
|
||||
// to false and move on without a warning. Page checksums will start on the next full backup.
|
||||
if (manifestData(result)->backupOptionChecksumPage == NULL)
|
||||
{
|
||||
cfgOptionSet(cfgOptChecksumPage, cfgSourceParam, BOOL_FALSE_VAR);
|
||||
}
|
||||
// Don't allow the checksum-page option to change in a diff or incr backup. This could be confusing as only
|
||||
// certain files would be checksummed and the list could be incomplete during reporting.
|
||||
// Don't allow the checksum-page option to change in a diff or incr backup. This could be confusing as only certain
|
||||
// files would be checksummed and the list could be incomplete during reporting.
|
||||
else
|
||||
{
|
||||
const bool checksumPagePrior = varBool(manifestData(result)->backupOptionChecksumPage);
|
||||
@ -1850,7 +1850,7 @@ backupProcessQueue(const BackupData *const backupData, Manifest *const manifest,
|
||||
"HINT: is something wrong with the clock or filesystem timestamps?");
|
||||
}
|
||||
|
||||
// If there are no files to backup then we'll exit with an error. This could happen if the database is down and backup is
|
||||
// If there are no files to backup then we'll exit with an error. This could happen if the database is down and backup is
|
||||
// called with --no-online twice in a row.
|
||||
if (fileTotal == 0)
|
||||
THROW(FileMissingError, "no files have changed since the last backup - this seems unlikely");
|
||||
@ -1911,7 +1911,7 @@ backupJobCallback(void *const data, const unsigned int clientIdx)
|
||||
// Get a new job if there are any left
|
||||
BackupJobData *const jobData = data;
|
||||
|
||||
// Determine where to begin scanning the queue (we'll stop when we get back here). When copying from the primary during
|
||||
// Determine where to begin scanning the queue (we'll stop when we get back here). When copying from the primary during
|
||||
// backup from standby only queue 0 will be used.
|
||||
const unsigned int queueOffset = jobData->backupStandby && clientIdx > 0 ? 1 : 0;
|
||||
int queueIdx =
|
||||
@ -2162,7 +2162,7 @@ backupProcess(
|
||||
// First client is always on the primary
|
||||
protocolParallelClientAdd(parallelExec, protocolLocalGet(protocolStorageTypePg, backupData->pgIdxPrimary, 1));
|
||||
|
||||
// Create the rest of the clients on the primary or standby depending on the value of backup-standby. Note that standby
|
||||
// Create the rest of the clients on the primary or standby depending on the value of backup-standby. Note that standby
|
||||
// backups don't count the primary client in process-max.
|
||||
const unsigned int processMax = cfgOptionUInt(cfgOptProcessMax) + (backupStandby ? 1 : 0);
|
||||
const unsigned int pgIdx = backupStandby ? backupData->pgIdxStandby : backupData->pgIdxPrimary;
|
||||
@ -2230,7 +2230,7 @@ backupProcess(
|
||||
ASSERT(lstEmpty(*(List **)lstGet(jobData.queueList, queueIdx)));
|
||||
#endif
|
||||
|
||||
// Remove files from the manifest that were removed during the backup. This must happen after processing to avoid
|
||||
// Remove files from the manifest that were removed during the backup. This must happen after processing to avoid
|
||||
// invalidating pointers by deleting items from the list.
|
||||
for (unsigned int fileRemoveIdx = 0; fileRemoveIdx < strLstSize(fileRemove); fileRemoveIdx++)
|
||||
manifestFileRemove(manifest, strLstGet(fileRemove, fileRemoveIdx));
|
||||
@ -2299,8 +2299,8 @@ backupArchiveCheckCopy(const BackupData *const backupData, Manifest *const manif
|
||||
|
||||
ASSERT(manifest != NULL);
|
||||
|
||||
// If archive logs are required to complete the backup, then check them. This is the default, but can be overridden if the
|
||||
// archive logs are going to a different server. Be careful of disabling this option because there is no way to verify that the
|
||||
// If archive logs are required to complete the backup, then check them. This is the default, but can be overridden if the
|
||||
// archive logs are going to a different server. Be careful of disabling this option because there is no way to verify that the
|
||||
// backup will be consistent - at least not here.
|
||||
if (cfgOptionBool(cfgOptArchiveCheck))
|
||||
{
|
||||
@ -2431,7 +2431,7 @@ backupComplete(InfoBackup *const infoBackup, Manifest *const manifest)
|
||||
{
|
||||
const String *const backupLabel = manifestData(manifest)->backupLabel;
|
||||
|
||||
// Validation and final save of the backup manifest. Validate in strict mode to catch as many potential issues as possible.
|
||||
// Validation and final save of the backup manifest. Validate in strict mode to catch as many potential issues as possible.
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
manifestValidate(manifest, true);
|
||||
|
||||
@ -2444,7 +2444,7 @@ backupComplete(InfoBackup *const infoBackup, Manifest *const manifest)
|
||||
storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strZ(backupLabel))));
|
||||
|
||||
// Copy a compressed version of the manifest to history. If the repo is encrypted then the passphrase to open the manifest
|
||||
// is required. We can't just do a straight copy since the destination needs to be compressed and that must happen before
|
||||
// is required. We can't just do a straight copy since the destination needs to be compressed and that must happen before
|
||||
// encryption in order to be efficient. Compression will always be gz for compatibility and since it is always available.
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
StorageRead *const manifestRead = storageNewReadP(
|
||||
@ -2472,8 +2472,8 @@ backupComplete(InfoBackup *const infoBackup, Manifest *const manifest)
|
||||
if (storageFeature(storageRepoWrite(), storageFeaturePathSync))
|
||||
storagePathSyncP(storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/" BACKUP_PATH_HISTORY));
|
||||
|
||||
// Create a symlink to the most recent backup if supported. This link is purely informational for the user and is never
|
||||
// used by us since symlinks are not supported on all storage types.
|
||||
// Create a symlink to the most recent backup if supported. This link is purely informational for the user and is never used
|
||||
// by us since symlinks are not supported on all storage types.
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
backupLinkLatest(backupLabel, cfgOptionGroupIdxDefault(cfgOptGrpRepo));
|
||||
|
||||
|
@ -157,8 +157,8 @@ backupLinkLatest(const String *const backupLabel, const unsigned int repoIdx)
|
||||
|
||||
MEM_CONTEXT_TEMP_BEGIN()
|
||||
{
|
||||
// Create a symlink to the most recent backup if supported. This link is purely informational for the user and is never
|
||||
// used by us since symlinks are not supported on all storage types.
|
||||
// Create a symlink to the most recent backup if supported. This link is purely informational for the user and is never used
|
||||
// by us since symlinks are not supported on all storage types.
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
const String *const latestLink = storagePathP(storageRepoIdx(repoIdx), STRDEF(STORAGE_REPO_BACKUP "/" BACKUP_LINK_LATEST));
|
||||
|
||||
|
@ -32,7 +32,7 @@ segmentNumber(const String *const pgFile)
|
||||
FUNCTION_TEST_PARAM(STRING, pgFile);
|
||||
FUNCTION_TEST_END();
|
||||
|
||||
// Determine which segment number this is by checking for a numeric extension. No extension means segment 0.
|
||||
// Determine which segment number this is by checking for a numeric extension. No extension means segment 0.
|
||||
FUNCTION_TEST_RETURN(UINT, regExpMatchOne(STRDEF("\\.[0-9]+$"), pgFile) ? cvtZToUInt(strrchr(strZ(pgFile), '.') + 1) : 0);
|
||||
}
|
||||
|
||||
@ -191,7 +191,7 @@ backupFile(
|
||||
|
||||
if (fileResult->backupCopyResult == backupCopyResultCopy || fileResult->backupCopyResult == backupCopyResultReCopy)
|
||||
{
|
||||
// Setup pg file for read. Only read as many bytes as passed in pgFileSize. If the file is growing it does no
|
||||
// Setup pg file for read. Only read as many bytes as passed in pgFileSize. If the file is growing it does no
|
||||
// good to copy data past the end of the size recorded in the manifest since those blocks will need to be
|
||||
// replayed from WAL during recovery.
|
||||
bool repoChecksum = false;
|
||||
|
@ -73,9 +73,9 @@ cmdOption(void)
|
||||
// Loop though options and add the ones that are interesting
|
||||
for (ConfigOption optionId = 0; optionId < CFG_OPTION_TOTAL; optionId++)
|
||||
{
|
||||
// Skip the option if not valid for this command. Generally only one command runs at a time, but sometimes
|
||||
// Skip the option if not valid for this command. Generally only one command runs at a time, but sometimes
|
||||
// commands are chained together (e.g. backup and expire) and the second command may not use all the options of
|
||||
// the first command. Displaying them is harmless but might cause confusion.
|
||||
// the first command. Displaying them is harmless but might cause confusion.
|
||||
if (!cfgOptionValid(optionId) || !cfgParseOptionValid(cfgCommand(), cfgCommandRole(), optionId))
|
||||
continue;
|
||||
|
||||
@ -199,7 +199,7 @@ cmdEnd(int code, const String *errorMessage)
|
||||
|
||||
ASSERT(cfgCommand() != cfgCmdNone);
|
||||
|
||||
// Skip this log message if it won't be output. It's not too expensive but since we skipped cmdBegin(), may as well.
|
||||
// Skip this log message if it won't be output. It's not too expensive but since we skipped cmdBegin(), may as well.
|
||||
if (logAny(cfgLogLevelDefault()))
|
||||
{
|
||||
MEM_CONTEXT_TEMP_BEGIN()
|
||||
|
@ -198,7 +198,7 @@ expireDiffBackup(InfoBackup *infoBackup, unsigned int repoIdx)
|
||||
{
|
||||
for (unsigned int diffIdx = 0; diffIdx < strLstSize(currentBackupList) - differentialRetention; diffIdx++)
|
||||
{
|
||||
// Skip if this is a full backup. Full backups only count as differential when deciding which differential
|
||||
// Skip if this is a full backup. Full backups only count as differential when deciding which differential
|
||||
// backups to expire.
|
||||
if (regExpMatchOne(backupRegExpP(.full = true), strLstGet(currentBackupList, diffIdx)))
|
||||
continue;
|
||||
@ -586,7 +586,7 @@ removeExpiredArchive(InfoBackup *infoBackup, bool timeBasedFullRetention, unsign
|
||||
|
||||
if (archiveRetentionBackup.backupArchiveStart != NULL)
|
||||
{
|
||||
// Get archive ranges to preserve. Because archive retention can be less than total retention it is
|
||||
// Get archive ranges to preserve. Because archive retention can be less than total retention it is
|
||||
// important to preserve archive that is required to make the older backups consistent even though they
|
||||
// cannot be played any further forward with PITR.
|
||||
String *archiveExpireMax = NULL;
|
||||
@ -669,7 +669,7 @@ removeExpiredArchive(InfoBackup *infoBackup, bool timeBasedFullRetention, unsign
|
||||
archiveExpire.stop = strDup(walPath);
|
||||
}
|
||||
// Else delete individual files instead if the major path is less than or equal to the most recent
|
||||
// retention backup. This optimization prevents scanning though major paths that could not possibly
|
||||
// retention backup. This optimization prevents scanning though major paths that could not possibly
|
||||
// have anything to expire.
|
||||
else if (strCmp(walPath, strSubN(archiveExpireMax, 0, 16)) <= 0)
|
||||
{
|
||||
|
@ -57,8 +57,8 @@ cmdRemote(ProtocolServer *const server)
|
||||
|
||||
MEM_CONTEXT_TEMP_BEGIN()
|
||||
{
|
||||
// Acquire a lock if this command needs one. We'll use the noop that is always sent from the client right after the
|
||||
// handshake to return an error. We can't take a lock earlier than this because we want the error to go back through the
|
||||
// Acquire a lock if this command needs one. We'll use the noop that is always sent from the client right after the
|
||||
// handshake to return an error. We can't take a lock earlier than this because we want the error to go back through the
|
||||
// protocol layer.
|
||||
volatile bool success = false;
|
||||
|
||||
|
@ -179,9 +179,9 @@ cmdStorageGet(void)
|
||||
{
|
||||
result = storageGetProcess(ioFdWriteNew(STRDEF("stdout"), STDOUT_FILENO, cfgOptionUInt64(cfgOptIoTimeout)));
|
||||
}
|
||||
// Ignore write errors because it's possible (even likely) that this output is being piped to something like head which
|
||||
// will exit when it gets what it needs and leave us writing to a broken pipe. It would be better to just ignore the broken
|
||||
// pipe error but currently we don't store system error codes.
|
||||
// Ignore write errors because it's possible (even likely) that this output is being piped to something like head which will
|
||||
// exit when it gets what it needs and leave us writing to a broken pipe. It would be better to just ignore the broken pipe
|
||||
// error but currently we don't store system error codes.
|
||||
CATCH(FileWriteError)
|
||||
{
|
||||
}
|
||||
|
@ -188,9 +188,9 @@ cmdStorageList(void)
|
||||
{
|
||||
storageListRender(ioFdWriteNew(STRDEF("stdout"), STDOUT_FILENO, cfgOptionUInt64(cfgOptIoTimeout)));
|
||||
}
|
||||
// Ignore write errors because it's possible (even likely) that this output is being piped to something like head which
|
||||
// will exit when it gets what it needs and leave us writing to a broken pipe. It would be better to just ignore the broken
|
||||
// pipe error but currently we don't store system error codes.
|
||||
// Ignore write errors because it's possible (even likely) that this output is being piped to something like head which will
|
||||
// exit when it gets what it needs and leave us writing to a broken pipe. It would be better to just ignore the broken pipe
|
||||
// error but currently we don't store system error codes.
|
||||
CATCH(FileWriteError)
|
||||
{
|
||||
}
|
||||
|
@ -803,9 +803,9 @@ restoreManifestOwner(const Manifest *const manifest, const String **const rootRe
|
||||
MEM_CONTEXT_PRIOR_END();
|
||||
}
|
||||
}
|
||||
// Else set owners to NULL. This means we won't make any attempt to update ownership and will just leave it as written by
|
||||
// the current user/group. If there are existing files that are not owned by the current user/group then we will attempt
|
||||
// to update them, which will generally cause an error, though some systems allow updates to the group ownership.
|
||||
// Else set owners to NULL. This means we won't make any attempt to update ownership and will just leave it as written by
|
||||
// the current user/group. If there are existing files that are not owned by the current user/group then we will attempt to
|
||||
// update them, which will generally cause an error, though some systems allow updates to the group ownership.
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
else
|
||||
{
|
||||
@ -942,7 +942,7 @@ restoreCleanBuildRecurse(StorageIterator *const storageItr, const RestoreCleanCa
|
||||
if (cleanData->basePath && info.type == storageTypeFile && strLstExists(cleanData->fileIgnore, info.name))
|
||||
continue;
|
||||
|
||||
// If this is not a delta then error because the directory is expected to be empty. Ignore the . path.
|
||||
// If this is not a delta then error because the directory is expected to be empty. Ignore the . path.
|
||||
if (!cleanData->delta)
|
||||
{
|
||||
THROW_FMT(
|
||||
@ -1127,9 +1127,9 @@ restoreCleanBuild(const Manifest *const manifest, const String *const rootReplac
|
||||
|
||||
strLstSort(cleanData->fileIgnore, sortOrderAsc);
|
||||
|
||||
// Check that the path exists. If not, there's no need to do any cleaning and we'll attempt to create it later.
|
||||
// Don't log check for the same path twice. There can be multiple links to files in the same path, but logging it more
|
||||
// than once makes the logs noisy and looks like a bug.
|
||||
// Check that the path exists. If not, there's no need to do any cleaning and we'll attempt to create it later. Don't
|
||||
// log check for the same path twice. There can be multiple links to files in the same path, but logging it more than
|
||||
// once makes the logs noisy and looks like a bug.
|
||||
if (!strLstExists(pathChecked, cleanData->targetPath))
|
||||
LOG_DETAIL_FMT("check '%s' exists", strZ(cleanData->targetPath));
|
||||
|
||||
@ -1201,8 +1201,8 @@ restoreCleanBuild(const Manifest *const manifest, const String *const rootReplac
|
||||
|
||||
// Step 2: Clean target directories
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
// Delete the pg_control file (if it exists) so the cluster cannot be started if restore does not complete. Sync the path
|
||||
// so the file does not return, zombie-like, in the case of a host crash.
|
||||
// Delete the pg_control file (if it exists) so the cluster cannot be started if restore does not complete. Sync the path so
|
||||
// the file does not return, zombie-like, in the case of a host crash.
|
||||
if (storageExistsP(storagePg(), STRDEF(PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL)))
|
||||
{
|
||||
LOG_DETAIL_FMT(
|
||||
@ -1218,10 +1218,10 @@ restoreCleanBuild(const Manifest *const manifest, const String *const rootReplac
|
||||
// Only clean if the target exists
|
||||
if (cleanData->exists)
|
||||
{
|
||||
// Don't clean file links. It doesn't matter whether the file exists or not since we know it is in the manifest.
|
||||
// Don't clean file links. It doesn't matter whether the file exists or not since we know it is in the manifest.
|
||||
if (cleanData->target->file == NULL)
|
||||
{
|
||||
// Only log when doing a delta restore because otherwise the targets should be empty. We'll still run the clean
|
||||
// Only log when doing a delta restore because otherwise the targets should be empty. We'll still run the clean
|
||||
// to fix permissions/ownership on the target paths.
|
||||
if (delta)
|
||||
LOG_INFO_FMT("remove invalid files/links/paths from '%s'", strZ(cleanData->targetPath));
|
||||
@ -1268,12 +1268,12 @@ restoreCleanBuild(const Manifest *const manifest, const String *const rootReplac
|
||||
{
|
||||
const ManifestPath *path = manifestPath(manifest, pathIdx);
|
||||
|
||||
// Skip the pg_tblspc path because it only maps to the manifest. We should remove this in a future release but not much
|
||||
// Skip the pg_tblspc path because it only maps to the manifest. We should remove this in a future release but not much
|
||||
// can be done about it for now.
|
||||
if (strEq(path->name, MANIFEST_TARGET_PGTBLSPC_STR))
|
||||
continue;
|
||||
|
||||
// If this path has been mapped as a link then create a link. The path has already been created as part of target
|
||||
// If this path has been mapped as a link then create a link. The path has already been created as part of target
|
||||
// creation (or it might have already existed).
|
||||
const ManifestLink *link = manifestLinkFindDefault(
|
||||
manifest,
|
||||
@ -1286,7 +1286,7 @@ restoreCleanBuild(const Manifest *const manifest, const String *const rootReplac
|
||||
const String *pgPath = storagePathP(storagePg(), manifestPathPg(link->name));
|
||||
StorageInfo linkInfo = storageInfoP(storagePg(), pgPath, .ignoreMissing = true);
|
||||
|
||||
// Create the link if it is missing. If it exists it should already have the correct ownership and destination.
|
||||
// Create the link if it is missing. If it exists it should already have the correct ownership and destination.
|
||||
if (!linkInfo.exists)
|
||||
{
|
||||
LOG_DETAIL_FMT("create symlink '%s' to '%s'", strZ(pgPath), strZ(link->destination));
|
||||
@ -1315,7 +1315,7 @@ restoreCleanBuild(const Manifest *const manifest, const String *const rootReplac
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Create file links. These don't get created during path creation because they do not have a matching path entry.
|
||||
// Step 4: Create file links. These don't get created during path creation because they do not have a matching path entry.
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
for (unsigned int linkIdx = 0; linkIdx < manifestLinkTotal(manifest); linkIdx++)
|
||||
{
|
||||
@ -1324,7 +1324,7 @@ restoreCleanBuild(const Manifest *const manifest, const String *const rootReplac
|
||||
const String *pgPath = storagePathP(storagePg(), manifestPathPg(link->name));
|
||||
StorageInfo linkInfo = storageInfoP(storagePg(), pgPath, .ignoreMissing = true);
|
||||
|
||||
// Create the link if it is missing. If it exists it should already have the correct ownership and destination.
|
||||
// Create the link if it is missing. If it exists it should already have the correct ownership and destination.
|
||||
if (!linkInfo.exists)
|
||||
{
|
||||
LOG_DETAIL_FMT("create symlink '%s' to '%s'", strZ(pgPath), strZ(link->destination));
|
||||
@ -1565,7 +1565,7 @@ restoreRecoveryOption(unsigned int pgVersion)
|
||||
String *key = strLstGet(recoveryOptionKey, keyIdx);
|
||||
const String *value = varStr(kvGet(recoveryOption, VARSTR(key)));
|
||||
|
||||
// Replace - in key with _. Since we use - users naturally will as well.
|
||||
// Replace - in key with _. Since we use - users naturally will as well.
|
||||
strReplaceChr(key, '-', '_');
|
||||
|
||||
kvPut(result, VARSTR(key), VARSTR(value));
|
||||
@ -1595,9 +1595,9 @@ restoreRecoveryOption(unsigned int pgVersion)
|
||||
// Write restore_command
|
||||
if (!strLstExists(recoveryOptionKey, RESTORE_COMMAND_STR))
|
||||
{
|
||||
// Null out options that it does not make sense to pass from the restore command to archive-get. All of these have
|
||||
// reasonable defaults so there is no danger of an error -- they just might not be optimal. In any case, it seems
|
||||
// better than, for example, passing --process-max=32 to archive-get because it was specified for restore.
|
||||
// Null out options that it does not make sense to pass from the restore command to archive-get. All of these have
|
||||
// reasonable defaults so there is no danger of an error -- they just might not be optimal. In any case, it seems better
|
||||
// than, for example, passing --process-max=32 to archive-get because it was specified for restore.
|
||||
KeyValue *optionReplace = kvNew();
|
||||
|
||||
kvPut(optionReplace, VARSTRDEF(CFGOPT_EXEC_ID), NULL);
|
||||
@ -1829,7 +1829,7 @@ restoreRecoveryWriteAutoConf(
|
||||
// If recovery was requested then write the recovery options
|
||||
if (cfgOptionStrId(cfgOptType) != CFGOPTVAL_TYPE_NONE)
|
||||
{
|
||||
// If the user specified standby_mode as a recovery option then error. It's tempting to just set type=standby in this
|
||||
// If the user specified standby_mode as a recovery option then error. It's tempting to just set type=standby in this
|
||||
// case but since config parsing has already happened the target options could be in an invalid state.
|
||||
if (cfgOptionTest(cfgOptRecoveryOption))
|
||||
{
|
||||
@ -1841,7 +1841,7 @@ restoreRecoveryWriteAutoConf(
|
||||
// Get the key and value
|
||||
String *key = strLstGet(recoveryOptionKey, keyIdx);
|
||||
|
||||
// Replace - in key with _. Since we use - users naturally will as well.
|
||||
// Replace - in key with _. Since we use - users naturally will as well.
|
||||
strReplaceChr(key, '-', '_');
|
||||
|
||||
if (strEq(key, STANDBY_MODE_STR))
|
||||
@ -2464,7 +2464,7 @@ cmdRestore(void)
|
||||
if (!manifestData(jobData.manifest)->backupOptionOnline && cfgOptionSource(cfgOptType) == cfgSourceDefault)
|
||||
cfgOptionSet(cfgOptType, cfgSourceParam, VARUINT64(CFGOPTVAL_TYPE_NONE));
|
||||
|
||||
// Validate manifest. Don't use strict mode because we'd rather ignore problems that won't affect a restore.
|
||||
// Validate manifest. Don't use strict mode because we'd rather ignore problems that won't affect a restore.
|
||||
manifestValidate(jobData.manifest, false);
|
||||
|
||||
// Get the cipher subpass used to decrypt files in the backup
|
||||
@ -2563,7 +2563,7 @@ cmdRestore(void)
|
||||
{
|
||||
const String *pgPath = manifestTargetPath(jobData.manifest, target);
|
||||
|
||||
// Don't sync the same path twice. There can be multiple links to files in the same path, but syncing it more than
|
||||
// Don't sync the same path twice. There can be multiple links to files in the same path, but syncing it more than
|
||||
// once makes the logs noisy and looks like a bug even though it doesn't hurt anything or realistically affect
|
||||
// performance.
|
||||
if (strLstExists(pathSynced, pgPath))
|
||||
@ -2582,7 +2582,7 @@ cmdRestore(void)
|
||||
{
|
||||
const String *manifestName = manifestPath(jobData.manifest, pathIdx)->name;
|
||||
|
||||
// Skip the pg_tblspc path because it only maps to the manifest. We should remove this in a future release but not much
|
||||
// Skip the pg_tblspc path because it only maps to the manifest. We should remove this in a future release but not much
|
||||
// can be done about it for now.
|
||||
if (strEqZ(manifestName, MANIFEST_TARGET_PGTBLSPC))
|
||||
continue;
|
||||
|
@ -7,7 +7,7 @@ Assert Routines
|
||||
#include "common/error/error.h"
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Asserts are used in test code to ensure that certain conditions are true. They are omitted from production builds.
|
||||
Asserts are used in test code to ensure that certain conditions are true. They are omitted from production builds.
|
||||
***********************************************************************************************************************************/
|
||||
#ifdef DEBUG
|
||||
|
||||
@ -58,7 +58,7 @@ Asserts are used in test code to ensure that certain conditions are true. They
|
||||
#endif
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Checks are used in production builds to test very important conditions. Be sure to limit use to the most critical cases.
|
||||
Checks are used in production builds to test very important conditions. Be sure to limit use to the most critical cases.
|
||||
***********************************************************************************************************************************/
|
||||
#define CHECK(type, condition, message) \
|
||||
do \
|
||||
|
@ -42,7 +42,7 @@ Functions
|
||||
// Get enum from a compression type string
|
||||
FN_EXTERN CompressType compressTypeEnum(StringId type);
|
||||
|
||||
// Check that a valid compress type is compiled into this binary. Errors when the compress type is not present.
|
||||
// Check that a valid compress type is compiled into this binary. Errors when the compress type is not present.
|
||||
FN_EXTERN void compressTypePresent(CompressType type);
|
||||
|
||||
// Get string representation of a compression type. This is the extension without the period.
|
||||
@ -52,7 +52,7 @@ FN_EXTERN const String *compressTypeStr(CompressType type);
|
||||
// compressType none is returned, even if the file is compressed with some unknown type.
|
||||
FN_EXTERN CompressType compressTypeFromName(const String *name);
|
||||
|
||||
// Compression filter for the specified type. Error when compress type is none or invalid.
|
||||
// Compression filter for the specified type. Error when compress type is none or invalid.
|
||||
typedef struct CompressFilterParam
|
||||
{
|
||||
VAR_PARAM_HEADER;
|
||||
@ -64,11 +64,11 @@ typedef struct CompressFilterParam
|
||||
|
||||
FN_EXTERN IoFilter *compressFilter(CompressType type, int level, CompressFilterParam param);
|
||||
|
||||
// Compression/decompression filter based on string type and a parameter list. This is useful when a filter must be created on a
|
||||
// Compression/decompression filter based on string type and a parameter list. This is useful when a filter must be created on a
|
||||
// remote system since the filter type and parameters can be passed through a protocol.
|
||||
FN_EXTERN IoFilter *compressFilterPack(StringId filterType, const Pack *filterParam);
|
||||
|
||||
// Decompression filter for the specified type. Error when compress type is none or invalid.
|
||||
// Decompression filter for the specified type. Error when compress type is none or invalid.
|
||||
typedef struct DecompressFilterParam
|
||||
{
|
||||
VAR_PARAM_HEADER;
|
||||
|
@ -20,7 +20,7 @@ Developed against version r131 using the documentation in https://github.com/lz4
|
||||
#include "common/type/pack.h"
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Older versions of lz4 do not define the max header size. This seems to be the max for any version.
|
||||
Older versions of lz4 do not define the max header size. This seems to be the max for any version.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef LZ4F_HEADER_SIZE_MAX
|
||||
#define LZ4F_HEADER_SIZE_MAX 19
|
||||
@ -79,9 +79,9 @@ lz4CompressFreeResource(THIS_VOID)
|
||||
/***********************************************************************************************************************************
|
||||
Compress data
|
||||
***********************************************************************************************************************************/
|
||||
// Helper to return a buffer where output will be written. If there is enough space in the provided output buffer then use it,
|
||||
// otherwise allocate an internal buffer to hold the compressed data. Once we start using the internal buffer we'll need to
|
||||
// continue using it until it is completely flushed.
|
||||
// Helper to return a buffer where output will be written. If there is enough space in the provided output buffer then use it,
|
||||
// otherwise allocate an internal buffer to hold the compressed data. Once we start using the internal buffer we'll need to continue
|
||||
// using it until it is completely flushed.
|
||||
static Buffer *
|
||||
lz4CompressBuffer(Lz4Compress *this, size_t required, Buffer *output)
|
||||
{
|
||||
|
@ -18,7 +18,7 @@ Block Cipher
|
||||
/***********************************************************************************************************************************
|
||||
Header constants and sizes
|
||||
***********************************************************************************************************************************/
|
||||
// Magic constant for salted encrypt. Only salted encrypt is done here, but this constant is required for compatibility with the
|
||||
// Magic constant for salted encrypt. Only salted encrypt is done here, but this constant is required for compatibility with the
|
||||
// openssl command-line tool.
|
||||
#define CIPHER_BLOCK_MAGIC "Salted__"
|
||||
#define CIPHER_BLOCK_MAGIC_SIZE (sizeof(CIPHER_BLOCK_MAGIC) - 1)
|
||||
@ -161,8 +161,8 @@ cipherBlockProcessBlock(CipherBlock *this, const unsigned char *source, size_t s
|
||||
source += headerExpected - this->headerSize;
|
||||
sourceSize -= headerExpected - this->headerSize;
|
||||
|
||||
// The first bytes of the file to decrypt should be equal to the magic. If not then this is not an
|
||||
// encrypted file, or at least not in a format we recognize.
|
||||
// The first bytes of the file to decrypt should be equal to the magic. If not then this is not an encrypted file,
|
||||
// or at least not in a format we recognize.
|
||||
if (!this->raw && memcmp(this->header, CIPHER_BLOCK_MAGIC, CIPHER_BLOCK_MAGIC_SIZE) != 0)
|
||||
THROW(CryptoError, "cipher header invalid");
|
||||
}
|
||||
@ -321,8 +321,8 @@ cipherBlockProcess(THIS_VOID, const Buffer *source, Buffer *destination)
|
||||
|
||||
if (source == NULL)
|
||||
{
|
||||
// If salt was not generated it means that process() was never called with any data. It's OK to encrypt a zero byte
|
||||
// file but we need to call process to generate the header.
|
||||
// If salt was not generated it means that process() was never called with any data. It's OK to encrypt a zero byte file
|
||||
// but we need to call process to generate the header.
|
||||
if (!this->saltDone)
|
||||
{
|
||||
destinationSizeActual = cipherBlockProcessBlock(this, NULL, 0, bufRemainsPtr(outputActual));
|
||||
@ -400,8 +400,8 @@ cipherBlockNew(const CipherMode mode, const CipherType cipherType, const Buffer
|
||||
// Init crypto subsystem
|
||||
cryptoInit();
|
||||
|
||||
// Lookup cipher by name. This means the ciphers passed in must exactly match a name expected by OpenSSL. This is a good
|
||||
// thing since the name required by the openssl command-line tool will match what is used by pgBackRest.
|
||||
// Lookup cipher by name. This means the ciphers passed in must exactly match a name expected by OpenSSL. This is a good thing
|
||||
// since the name required by the openssl command-line tool will match what is used by pgBackRest.
|
||||
String *const cipherTypeStr = strIdToStr(cipherType);
|
||||
const EVP_CIPHER *cipher = EVP_get_cipherbyname(strZ(cipherTypeStr));
|
||||
|
||||
@ -410,7 +410,7 @@ cipherBlockNew(const CipherMode mode, const CipherType cipherType, const Buffer
|
||||
|
||||
strFree(cipherTypeStr);
|
||||
|
||||
// Lookup digest. If not defined it will be set to sha1.
|
||||
// Lookup digest. If not defined it will be set to sha1.
|
||||
const EVP_MD *digest = NULL;
|
||||
|
||||
if (param.digest)
|
||||
|
@ -84,7 +84,7 @@ static struct
|
||||
/***********************************************************************************************************************************
|
||||
Message buffer and buffer size
|
||||
|
||||
The message buffer is statically allocated so there is some space to store error messages. Not being able to allocate such a small
|
||||
The message buffer is statically allocated so there is some space to store error messages. Not being able to allocate such a small
|
||||
amount of memory seems pretty unlikely so just keep the code simple and let the loader deal with massively constrained memory
|
||||
situations.
|
||||
|
||||
|
@ -25,11 +25,11 @@ FINALLY()
|
||||
}
|
||||
TRY_END();
|
||||
|
||||
The CATCH() and FINALLY() blocks are optional but at least one must be specified. There is no need for a TRY block by itself
|
||||
The CATCH() and FINALLY() blocks are optional but at least one must be specified. There is no need for a TRY block by itself
|
||||
because errors will automatically be propagated to the nearest try block in the call stack.
|
||||
|
||||
IMPORTANT: If a local variable of the function containing a TRY block is modified in the TRY_BEGIN() block and used later in the
|
||||
function after an error is thrown, that variable must be declared "volatile" if the preserving the value is important. Beware that
|
||||
function after an error is thrown, that variable must be declared "volatile" if the preserving the value is important. Beware that
|
||||
gcc's -Wclobbered warnings are almost entirely useless for catching such issues.
|
||||
|
||||
IMPORTANT: Never call return from within any of the error-handling blocks.
|
||||
|
@ -46,7 +46,7 @@ struct Exec
|
||||
Macro to close file descriptors after dup2() in the child process
|
||||
|
||||
If the parent process is daemomized and has closed stdout, stdin, and stderr or some combination of them, then the newly created
|
||||
descriptors might overlap stdout, stdin, or stderr. In that case we don't want to accidentally close the descriptor that we have
|
||||
descriptors might overlap stdout, stdin, or stderr. In that case we don't want to accidentally close the descriptor that we have
|
||||
just copied.
|
||||
|
||||
Note that this is pretty specific to the way that file descriptors are handled in this module and may not be generally applicable in
|
||||
@ -148,7 +148,7 @@ execNew(const String *command, const StringList *param, const String *name, Time
|
||||
/***********************************************************************************************************************************
|
||||
Check if the process is still running
|
||||
|
||||
This should be called when anything unexpected happens while reading or writing, including errors and eof. If this function returns
|
||||
This should be called when anything unexpected happens while reading or writing, including errors and eof. If this function returns
|
||||
then the original error should be rethrown.
|
||||
***********************************************************************************************************************************/
|
||||
static void
|
||||
@ -303,7 +303,7 @@ execOpen(Exec *this)
|
||||
|
||||
ASSERT(this != NULL);
|
||||
|
||||
// Create pipes to communicate with the subprocess. The names of the pipes are from the perspective of the parent process since
|
||||
// Create pipes to communicate with the subprocess. The names of the pipes are from the perspective of the parent process since
|
||||
// the child process will use them only briefly before exec'ing.
|
||||
int pipeRead[2];
|
||||
int pipeWrite[2];
|
||||
@ -331,10 +331,10 @@ execOpen(Exec *this)
|
||||
// Assign stderr to the input side of the error pipe
|
||||
PIPE_DUP2(pipeError, 1, STDERR_FILENO);
|
||||
|
||||
// Execute the binary. This statement will not return if it is successful
|
||||
// Execute the binary. This statement will not return if it is successful
|
||||
execvp(strZ(this->command), (char **const)strLstPtr(this->param));
|
||||
|
||||
// If we got here then there was an error. We can't use a throw as we normally would because we have already shutdown
|
||||
// If we got here then there was an error. We can't use a throw as we normally would because we have already shutdown
|
||||
// logging and we don't want to execute exit paths that might free parent resources which we still have references to.
|
||||
fprintf(stderr, "unable to execute '%s': [%d] %s\n", strZ(this->command), errno, strerror(errno));
|
||||
exit(errorTypeCode(&ExecuteError));
|
||||
|
@ -35,7 +35,7 @@ forkDetach(void)
|
||||
// The process should never receive a SIGHUP but ignore it just in case
|
||||
signal(SIGHUP, SIG_IGN);
|
||||
|
||||
// There should be no way the child process can exit first (after the next fork) but just in case ignore SIGCHLD. This means
|
||||
// There should be no way the child process can exit first (after the next fork) but just in case ignore SIGCHLD. This means
|
||||
// that the child process will automatically be reaped by the kernel should it finish first rather than becoming defunct.
|
||||
signal(SIGCHLD, SIG_IGN);
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/***********************************************************************************************************************************
|
||||
IO Buffer Filter
|
||||
|
||||
Move data from the input buffer to the output buffer without overflowing the output buffer. Automatically used as the last filter
|
||||
Move data from the input buffer to the output buffer without overflowing the output buffer. Automatically used as the last filter
|
||||
in a FilterGroup if the last filter is not already an InOut filter, so there is no reason to add it manually to a FilterGroup.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef COMMON_IO_FILTER_BUFFER_H
|
||||
|
@ -108,8 +108,8 @@ ioFilterProcessInOut(IoFilter *this, const Buffer *input, Buffer *output)
|
||||
}
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
If done is not defined by the filter then check inputSame. If inputSame is true then the filter is not done. Even if the filter
|
||||
is done the interface will not report done until the interface is flushing.
|
||||
If done is not defined by the filter then check inputSame. If inputSame is true then the filter is not done. Even if the filter is
|
||||
done the interface will not report done until the interface is flushing.
|
||||
***********************************************************************************************************************************/
|
||||
FN_EXTERN bool
|
||||
ioFilterDone(const IoFilter *this)
|
||||
|
@ -3,8 +3,8 @@ IO Filter Interface
|
||||
|
||||
Filters can modify an i/o stream (e.g. GzCompress, GzDecompress), generate a result (e.g. IoSize, CryptoHash), or even do both.
|
||||
|
||||
A filter is created using a constructor implemented by each filter (e.g. ioBufferNew). Filter processing is managed by
|
||||
IoFilterGroup so the only user facing functions are ioFilterResult() and ioFilterType().
|
||||
A filter is created using a constructor implemented by each filter (e.g. ioBufferNew). Filter processing is managed by IoFilterGroup
|
||||
so the only user facing functions are ioFilterResult() and ioFilterType().
|
||||
|
||||
Information on implementing a filter is in filter.internal.h.
|
||||
***********************************************************************************************************************************/
|
||||
|
@ -1,17 +1,17 @@
|
||||
/***********************************************************************************************************************************
|
||||
IO Filter Interface Internal
|
||||
|
||||
Two types of filters are implemented using this interface: In and InOut.
|
||||
Two types of filters are implemented using this interface: In and InOut.
|
||||
|
||||
In filters accept input and produce a result, but do not modify the input. An example is the IoSize filter which counts all bytes
|
||||
In filters accept input and produce a result, but do not modify the input. An example is the IoSize filter which counts all bytes
|
||||
that pass through it.
|
||||
|
||||
InOut filters accept input and produce output (and perhaps a result). Because the input/output buffers may not be the same size the
|
||||
InOut filters accept input and produce output (and perhaps a result). Because the input/output buffers may not be the same size the
|
||||
filter must be prepared to accept the same input again (by implementing IoFilterInputSame) if the output buffer is too small to
|
||||
accept all processed data. If the filter holds state even when inputSame is false then it may also implement IoFilterDone to
|
||||
indicate that the filter should be flushed (by passing NULL inputs) after all input has been processed. InOut filters should strive
|
||||
accept all processed data. If the filter holds state even when inputSame is false then it may also implement IoFilterDone to
|
||||
indicate that the filter should be flushed (by passing NULL inputs) after all input has been processed. InOut filters should strive
|
||||
to fill the output buffer as much as possible, i.e., if the output buffer is not full after processing then inputSame should be
|
||||
false. An example is the IoBuffer filter which buffers data between unequally sized input/output buffers.
|
||||
false. An example is the IoBuffer filter which buffers data between unequally sized input/output buffers.
|
||||
|
||||
Each filter has a type that allows it to be identified in the filter list.
|
||||
***********************************************************************************************************************************/
|
||||
@ -27,21 +27,21 @@ Constructors
|
||||
***********************************************************************************************************************************/
|
||||
typedef struct IoFilterInterface
|
||||
{
|
||||
// Indicates that filter processing is done. This is used for filters that have additional data to be flushed even after all
|
||||
// input has been processed. Compression and encryption filters will usually need to implement done. If done is not
|
||||
// implemented then it will always return true if all input has been consumed, i.e. if inputSame returns false.
|
||||
// Indicates that filter processing is done. This is used for filters that have additional data to be flushed even after all
|
||||
// input has been processed. Compression and encryption filters will usually need to implement done. If done is not implemented
|
||||
// then it will always return true if all input has been consumed, i.e. if inputSame returns false.
|
||||
bool (*done)(const void *driver);
|
||||
|
||||
// Processing function for filters that do not produce output. Note that result must be implemented in this case (or else what
|
||||
// Processing function for filters that do not produce output. Note that result must be implemented in this case (or else what
|
||||
// would be the point.
|
||||
void (*in)(void *driver, const Buffer *);
|
||||
|
||||
// Processing function for filters that produce output. InOut filters will typically implement inputSame and may also implement
|
||||
// Processing function for filters that produce output. InOut filters will typically implement inputSame and may also implement
|
||||
// done.
|
||||
void (*inOut)(void *driver, const Buffer *, Buffer *);
|
||||
|
||||
// InOut filters must be prepared for an output buffer that is too small to accept all the processed output. In this case the
|
||||
// filter must implement inputSame and set it to true when there is more output to be produced for a given input. On the next
|
||||
// InOut filters must be prepared for an output buffer that is too small to accept all the processed output. In this case the
|
||||
// filter must implement inputSame and set it to true when there is more output to be produced for a given input. On the next
|
||||
// call to inOut the same input will be passed along with a fresh output buffer with space for more processed output.
|
||||
bool (*inputSame)(const void *driver);
|
||||
|
||||
|
@ -174,7 +174,7 @@ ioFilterGroupOpen(IoFilterGroup *this)
|
||||
|
||||
MEM_CONTEXT_OBJ_BEGIN(this)
|
||||
{
|
||||
// If the last filter is not an output filter then add a filter to buffer/copy data. Input filters won't copy to an output
|
||||
// If the last filter is not an output filter then add a filter to buffer/copy data. Input filters won't copy to an output
|
||||
// buffer so we need some way to get the data to the output buffer.
|
||||
if (ioFilterGroupSize(this) == 0 ||
|
||||
!ioFilterOutput((ioFilterGroupGet(this, ioFilterGroupSize(this) - 1))->filter))
|
||||
@ -182,7 +182,7 @@ ioFilterGroupOpen(IoFilterGroup *this)
|
||||
ioFilterGroupAdd(this, ioBufferNew());
|
||||
}
|
||||
|
||||
// Create filter input/output buffers. Input filters do not get an output buffer since they don't produce output.
|
||||
// Create filter input/output buffers. Input filters do not get an output buffer since they don't produce output.
|
||||
Buffer **lastOutputBuffer = NULL;
|
||||
|
||||
for (unsigned int filterIdx = 0; filterIdx < ioFilterGroupSize(this); filterIdx++)
|
||||
@ -198,13 +198,13 @@ ioFilterGroupOpen(IoFilterGroup *this)
|
||||
else
|
||||
{
|
||||
// This cast is required because the compiler can't guarantee the const-ness of this object, i.e. it could be
|
||||
// modified in other parts of the code. This is actually expected and the only reason we need this const is to
|
||||
// match the const-ness of the input buffer provided by the caller.
|
||||
// modified in other parts of the code. This is actually expected and the only reason we need this const is to match
|
||||
// the const-ness of the input buffer provided by the caller.
|
||||
filterData->input = (const Buffer **)lastOutputBuffer;
|
||||
filterData->inputLocal = *lastOutputBuffer;
|
||||
}
|
||||
|
||||
// If this is not the last output filter then create a new output buffer for it. The output buffer for the last filter
|
||||
// If this is not the last output filter then create a new output buffer for it. The output buffer for the last filter
|
||||
// will be provided to the process function.
|
||||
if (ioFilterOutput(filterData->filter) && filterIdx < ioFilterGroupSize(this) - 1)
|
||||
{
|
||||
@ -256,8 +256,8 @@ ioFilterGroupProcess(IoFilterGroup *this, const Buffer *input, Buffer *output)
|
||||
// Start from the first filter by default
|
||||
unsigned int filterIdx = 0;
|
||||
|
||||
// Search from the end of the list for a filter that needs the same input. This indicates that the filter was not able to
|
||||
// empty the input buffer on the last call. Maybe it won't this time either -- we can but try.
|
||||
// Search from the end of the list for a filter that needs the same input. This indicates that the filter was not able to
|
||||
// empty the input buffer on the last call. Maybe it won't this time either -- we can but try.
|
||||
if (ioFilterGroupInputSame(this))
|
||||
{
|
||||
this->pub.inputSame = false;
|
||||
@ -275,13 +275,13 @@ ioFilterGroupProcess(IoFilterGroup *this, const Buffer *input, Buffer *output)
|
||||
}
|
||||
while (filterIdx != 0);
|
||||
|
||||
// If no filter is found that needs the same input that means we are done with the current input. So end the loop and
|
||||
// If no filter is found that needs the same input that means we are done with the current input. So end the loop and
|
||||
// get some more input.
|
||||
if (!ioFilterGroupInputSame(this))
|
||||
break;
|
||||
}
|
||||
|
||||
// Process forward from the filter that has input to process. This may be a filter that needs the same input or it may be
|
||||
// Process forward from the filter that has input to process. This may be a filter that needs the same input or it may be
|
||||
// new input for the first filter.
|
||||
for (; filterIdx < ioFilterGroupSize(this); filterIdx++)
|
||||
{
|
||||
@ -301,7 +301,7 @@ ioFilterGroupProcess(IoFilterGroup *this, const Buffer *input, Buffer *output)
|
||||
{
|
||||
this->pub.inputSame = true;
|
||||
}
|
||||
// Else clear the buffer if it was locally allocated. If the input buffer was passed in then the caller is
|
||||
// Else clear the buffer if it was locally allocated. If the input buffer was passed in then the caller is
|
||||
// responsible for clearing it.
|
||||
else if (filterData->inputLocal != NULL)
|
||||
bufUsedZero(filterData->inputLocal);
|
||||
@ -315,7 +315,7 @@ ioFilterGroupProcess(IoFilterGroup *this, const Buffer *input, Buffer *output)
|
||||
ioFilterProcessIn(filterData->filter, *filterData->input);
|
||||
}
|
||||
|
||||
// If the filter is done and has no more output then null the output buffer. Downstream filters have a pointer to this
|
||||
// If the filter is done and has no more output then null the output buffer. Downstream filters have a pointer to this
|
||||
// buffer so their inputs will also change to null and they'll flush.
|
||||
if (filterData->output != NULL && ioFilterDone(filterData->filter) && bufUsed(filterData->output) == 0)
|
||||
filterData->output = NULL;
|
||||
|
@ -1,11 +1,11 @@
|
||||
/***********************************************************************************************************************************
|
||||
IO Filter Group
|
||||
|
||||
Process data through an arbitrary group of filters in the order added by the user using ioFilterGroupAdd(). After processing
|
||||
results can be gathered using ioFilterGroupResult() for any filters that produce results.
|
||||
Process data through an arbitrary group of filters in the order added by the user using ioFilterGroupAdd(). After processing results
|
||||
can be gathered using ioFilterGroupResult() for any filters that produce results.
|
||||
|
||||
Processing is complex and asymmetric for read/write so should be done via the IoRead and IoWrite objects. General users need
|
||||
only call ioFilterGroupNew(), ioFilterGroupAdd(), and ioFilterGroupResult().
|
||||
Processing is complex and asymmetric for read/write so should be done via the IoRead and IoWrite objects. General users need only
|
||||
call ioFilterGroupNew(), ioFilterGroupAdd(), and ioFilterGroupResult().
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef COMMON_IO_FILTER_GROUP_H
|
||||
#define COMMON_IO_FILTER_GROUP_H
|
||||
|
@ -1,7 +1,7 @@
|
||||
/***********************************************************************************************************************************
|
||||
IO Sink Filter
|
||||
|
||||
Consume all bytes sent to the filter without passing any on. This filter is useful when running size/hash filters on a remote when
|
||||
Consume all bytes sent to the filter without passing any on. This filter is useful when running size/hash filters on a remote when
|
||||
no data should be returned.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef COMMON_IO_FILTER_SINK_H
|
||||
|
@ -1,7 +1,7 @@
|
||||
/***********************************************************************************************************************************
|
||||
IO Size Filter
|
||||
|
||||
Count all bytes that pass through the filter. Useful for getting file/IO size if added first in a FilterGroup with IoRead or last
|
||||
Count all bytes that pass through the filter. Useful for getting file/IO size if added first in a FilterGroup with IoRead or last
|
||||
in a FilterGroup with IoWrite.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef COMMON_IO_FILTER_SIZE_H
|
||||
|
@ -3,7 +3,7 @@ HTTP Client
|
||||
|
||||
A robust HTTP client with connection reuse and automatic retries.
|
||||
|
||||
Using a single object to make multiple requests is more efficient because connections are reused whenever possible. Requests are
|
||||
Using a single object to make multiple requests is more efficient because connections are reused whenever possible. Requests are
|
||||
automatically retried when the connection has been closed by the server. Any 5xx response is also retried.
|
||||
|
||||
Only the HTTPS protocol is currently supported.
|
||||
|
@ -79,8 +79,8 @@ httpHeaderAdd(HttpHeader *this, const String *key, const String *value)
|
||||
const Variant *keyVar = VARSTR(key);
|
||||
const Variant *valueVar = kvGet(this->kv, keyVar);
|
||||
|
||||
// If the key exists then append the new value. The HTTP spec (RFC 2616, Section 4.2) says that if a header appears more than
|
||||
// once then it is equivalent to a single comma-separated header. There appear to be a few exceptions such as Set-Cookie, but
|
||||
// If the key exists then append the new value. The HTTP spec (RFC 2616, Section 4.2) says that if a header appears more than
|
||||
// once then it is equivalent to a single comma-separated header. There appear to be a few exceptions such as Set-Cookie, but
|
||||
// they should not be of concern to us here.
|
||||
if (valueVar != NULL)
|
||||
{
|
||||
|
@ -1,7 +1,7 @@
|
||||
/***********************************************************************************************************************************
|
||||
HTTP Header
|
||||
|
||||
Object to track HTTP headers. Headers can be marked as redacted so they are not logged.
|
||||
Object to track HTTP headers. Headers can be marked as redacted so they are not logged.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef COMMON_IO_HTTP_HEADER_H
|
||||
#define COMMON_IO_HTTP_HEADER_H
|
||||
|
@ -139,9 +139,9 @@ httpRequestProcess(HttpRequest *this, bool waitForResponse, bool contentCache)
|
||||
{
|
||||
result = httpResponseNew(session, httpRequestVerb(this), contentCache);
|
||||
|
||||
// Retry when response code is 5xx. These errors generally represent a server error for a request that
|
||||
// looks valid. There are a few errors that might be permanently fatal but they are rare and it seems best
|
||||
// not to try and pick and choose errors in this class to retry.
|
||||
// Retry when response code is 5xx. These errors generally represent a server error for a request that looks
|
||||
// valid. There are a few errors that might be permanently fatal but they are rare and it seems best not to
|
||||
// try and pick and choose errors in this class to retry.
|
||||
if (httpResponseCode(result) / 100 == HTTP_RESPONSE_CODE_RETRY_CLASS)
|
||||
THROW_FMT(ServiceError, "[%u] %s", httpResponseCode(result), strZ(httpResponseReason(result)));
|
||||
|
||||
|
@ -145,7 +145,7 @@ httpResponseRead(THIS_VOID, Buffer *buffer, bool block)
|
||||
if (this->contentRemaining > 0)
|
||||
{
|
||||
// If the buffer is larger than the content that needs to be read then limit the buffer size so the read
|
||||
// won't block or read too far. Casting to size_t is safe on 32-bit because we know the max buffer size is
|
||||
// won't block or read too far. Casting to size_t is safe on 32-bit because we know the max buffer size is
|
||||
// defined as less than 2^32 so content remaining can't be more than that.
|
||||
if (bufRemains(buffer) > this->contentRemaining)
|
||||
bufLimitSet(buffer, bufSize(buffer) - (bufRemains(buffer) - (size_t)this->contentRemaining));
|
||||
@ -310,7 +310,7 @@ httpResponseNew(HttpSession *session, const String *verb, bool contentCache)
|
||||
this->contentRemaining = this->contentSize;
|
||||
}
|
||||
|
||||
// If the server notified of a closed connection then close the client connection after reading content. This
|
||||
// If the server notified of a closed connection then close the client connection after reading content. This
|
||||
// prevents doing a retry on the next request when using the closed connection.
|
||||
if (strEq(headerKey, HTTP_HEADER_CONNECTION_STR) && strEq(strLower(headerValue), HTTP_VALUE_CONNECTION_CLOSE_STR))
|
||||
this->closeOnContentEof = true;
|
||||
@ -328,12 +328,12 @@ httpResponseNew(HttpSession *session, const String *verb, bool contentCache)
|
||||
HTTP_HEADER_CONTENT_LENGTH);
|
||||
}
|
||||
|
||||
// Was content returned in the response? HEAD will report content but not actually return any.
|
||||
// Was content returned in the response? HEAD will report content but not actually return any.
|
||||
this->contentExists =
|
||||
(this->contentChunked || this->contentSize > 0 || this->closeOnContentEof) && !strEq(verb, HTTP_VERB_HEAD_STR);
|
||||
this->contentEof = !this->contentExists;
|
||||
|
||||
// Create an io object, even if there is no content. This makes the logic for readers easier -- they can just check eof
|
||||
// Create an io object, even if there is no content. This makes the logic for readers easier -- they can just check eof
|
||||
// rather than also checking if the io object exists.
|
||||
MEM_CONTEXT_OBJ_BEGIN(this)
|
||||
{
|
||||
|
@ -11,8 +11,8 @@ IO Functions
|
||||
/***********************************************************************************************************************************
|
||||
Buffer size
|
||||
|
||||
This buffer size will be used for all IO operations that require buffers not passed by the caller. Initially it is set to a
|
||||
conservative default with the expectation that it will be changed to a new value after options have been loaded. In general callers
|
||||
This buffer size will be used for all IO operations that require buffers not passed by the caller. Initially it is set to a
|
||||
conservative default with the expectation that it will be changed to a new value after options have been loaded. In general callers
|
||||
should set their buffer size using ioBufferSize() but there may be cases where an alternative buffer size makes sense.
|
||||
***********************************************************************************************************************************/
|
||||
#define IO_BUFFER_BLOCK_SIZE (8 * 1024)
|
||||
|
@ -276,7 +276,7 @@ ioReadLineParam(IoRead *this, bool allowEof)
|
||||
ASSERT(this != NULL);
|
||||
ASSERT(this->pub.opened && !this->pub.closed);
|
||||
|
||||
// Allocate the output buffer if it has not already been allocated. This buffer is not allocated at object creation because it
|
||||
// Allocate the output buffer if it has not already been allocated. This buffer is not allocated at object creation because it
|
||||
// is not always used.
|
||||
if (this->output == NULL)
|
||||
{
|
||||
|
@ -1,10 +1,10 @@
|
||||
/***********************************************************************************************************************************
|
||||
IO Read Interface
|
||||
|
||||
Objects that read from some IO source (file, socket, etc.) are implemented using this interface. All objects are required to
|
||||
implement IoReadProcess and can optionally implement IoReadOpen, IoReadClose, or IoReadEof. IoReadOpen and IoReadClose can be used
|
||||
to allocate/open or deallocate/free resources. If IoReadEof is not implemented then ioReadEof() will always return false. An
|
||||
example of an IoRead object is IoBufferRead.
|
||||
Objects that read from some IO source (file, socket, etc.) are implemented using this interface. All objects are required to
|
||||
implement IoReadProcess and can optionally implement IoReadOpen, IoReadClose, or IoReadEof. IoReadOpen and IoReadClose can be used
|
||||
to allocate/open or deallocate/free resources. If IoReadEof is not implemented then ioReadEof() will always return false. An example
|
||||
of an IoRead object is IoBufferRead.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef COMMON_IO_READ_H
|
||||
#define COMMON_IO_READ_H
|
||||
|
@ -1,9 +1,9 @@
|
||||
/***********************************************************************************************************************************
|
||||
IO Write Interface
|
||||
|
||||
Objects that write to some IO destination (file, socket, etc.) are implemented using this interface. All objects are required to
|
||||
implement IoWriteProcess and can optionally implement IoWriteOpen or IoWriteClose. IoWriteOpen and IoWriteClose can be used to
|
||||
allocate/open or deallocate/free resources. An example of an IoWrite object is IoBufferWrite.
|
||||
Objects that write to some IO destination (file, socket, etc.) are implemented using this interface. All objects are required to
|
||||
implement IoWriteProcess and can optionally implement IoWriteOpen or IoWriteClose. IoWriteOpen and IoWriteClose can be used to
|
||||
allocate/open or deallocate/free resources. An example of an IoWrite object is IoBufferWrite.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef COMMON_IO_WRITE_H
|
||||
#define COMMON_IO_WRITE_H
|
||||
|
@ -432,7 +432,7 @@ lockAcquire(const LockAcquireParam param)
|
||||
|
||||
bool result = true;
|
||||
|
||||
// Don't allow failures when locking more than one file. This makes cleanup difficult and there are no known use cases.
|
||||
// Don't allow failures when locking more than one file. This makes cleanup difficult and there are no known use cases.
|
||||
ASSERT(!param.returnOnNoLock || lockLocal.type != lockTypeAll);
|
||||
|
||||
// Don't allow another lock if one is already held
|
||||
|
@ -40,7 +40,7 @@ Functions
|
||||
// Initialize lock module
|
||||
FN_EXTERN void lockInit(const String *path, const String *execId, const String *stanza, LockType type);
|
||||
|
||||
// Acquire a lock type. This will involve locking one or more files on disk depending on the lock type. Most operations only take a
|
||||
// Acquire a lock type. This will involve locking one or more files on disk depending on the lock type. Most operations only take a
|
||||
// single lock (archive or backup), but the stanza commands all need to lock both.
|
||||
typedef struct LockAcquireParam
|
||||
{
|
||||
|
@ -49,8 +49,8 @@ call site by supplying commonly-used values.
|
||||
Note that it's possible that not all the macros below will appear in the code. In particular the ERROR and ASSERT macros should not
|
||||
be used directly. They are included for completeness and future usage.
|
||||
***********************************************************************************************************************************/
|
||||
// Define a macro to test logAny() that can be removed when performing coverage testing. Checking logAny() saves a function call
|
||||
// for logging calls that won't be output anywhere, but since the macro contains a branch it causes coverage problems.
|
||||
// Define a macro to test logAny() that can be removed when performing coverage testing. Checking logAny() saves a function call for
|
||||
// logging calls that won't be output anywhere, but since the macro contains a branch it causes coverage problems.
|
||||
#ifdef DEBUG_COVERAGE
|
||||
#define IF_LOG_ANY(logLevel)
|
||||
#else
|
||||
|
@ -41,10 +41,10 @@ Useful for ensuring coverage in cases where compared values may be always ascend
|
||||
If the "condition" (a compile-time-constant expression) evaluates to false then throw a compile error using the "message" (a string
|
||||
literal).
|
||||
|
||||
gcc 4.6 and up supports _Static_assert(), but there are bizarre syntactic placement restrictions. Macros STATIC_ASSERT_STMT() and
|
||||
gcc 4.6 and up supports _Static_assert(), but there are bizarre syntactic placement restrictions. Macros STATIC_ASSERT_STMT() and
|
||||
STATIC_ASSERT_EXP() make it safe to use as a statement or in an expression, respectively.
|
||||
|
||||
Otherwise we fall back on a kluge that assumes the compiler will complain about a negative width for a struct bit-field. This will
|
||||
Otherwise we fall back on a kluge that assumes the compiler will complain about a negative width for a struct bit-field. This will
|
||||
not include a helpful error message, but it beats not getting an error at all. Note that when std=c99 it looks like gcc is using the
|
||||
same kluge.
|
||||
|
||||
|
@ -186,7 +186,7 @@ memContextCallbackOne(MemContext *const memContext)
|
||||
/***********************************************************************************************************************************
|
||||
Top context
|
||||
|
||||
The top context always exists and can never be freed. All other contexts are children of the top context. The top context is
|
||||
The top context always exists and can never be freed. All other contexts are children of the top context. The top context is
|
||||
generally used to allocate memory that exists for the life of the program.
|
||||
***********************************************************************************************************************************/
|
||||
static struct MemContextTop
|
||||
|
@ -1,10 +1,10 @@
|
||||
/***********************************************************************************************************************************
|
||||
Memory Context Manager
|
||||
|
||||
Memory is allocated inside contexts and all allocations (and child memory contexts) are freed when the context is freed. The goal
|
||||
Memory is allocated inside contexts and all allocations (and child memory contexts) are freed when the context is freed. The goal
|
||||
is to make memory management both easier and more performant.
|
||||
|
||||
Memory context management is encapsulated in macros so there is rarely any need to call the functions directly. Memory allocations
|
||||
Memory context management is encapsulated in macros so there is rarely any need to call the functions directly. Memory allocations
|
||||
are mostly performed in the constructors of objects and reallocated as needed.
|
||||
|
||||
See the sections on memory context management and memory allocations below for more details.
|
||||
@ -26,15 +26,15 @@ typedef struct MemContext MemContext;
|
||||
/***********************************************************************************************************************************
|
||||
Define initial number of memory contexts
|
||||
|
||||
No space is reserved for child contexts when a new context is created because most contexts will be leaves. When a child context is
|
||||
requested then space will be reserved for this many child contexts initially. When more space is needed the size will be doubled.
|
||||
No space is reserved for child contexts when a new context is created because most contexts will be leaves. When a child context is
|
||||
requested then space will be reserved for this many child contexts initially. When more space is needed the size will be doubled.
|
||||
***********************************************************************************************************************************/
|
||||
#define MEM_CONTEXT_INITIAL_SIZE 4
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Define initial number of memory allocations
|
||||
|
||||
Space is reserved for this many allocations when a context is created. When more space is needed the size will be doubled.
|
||||
Space is reserved for this many allocations when a context is created. When more space is needed the size will be doubled.
|
||||
***********************************************************************************************************************************/
|
||||
#define MEM_CONTEXT_ALLOC_INITIAL_SIZE 4
|
||||
|
||||
@ -78,7 +78,7 @@ FN_EXTERN void *memNew(size_t size);
|
||||
// Allocate requested number of pointers and initialize them to NULL
|
||||
FN_EXTERN void *memNewPtrArray(size_t size);
|
||||
|
||||
// Reallocate to the new size. Original buffer pointer is undefined on return.
|
||||
// Reallocate to the new size. Original buffer pointer is undefined on return.
|
||||
FN_EXTERN void *memResize(const void *buffer, size_t size);
|
||||
|
||||
// Free memory allocation
|
||||
@ -221,7 +221,7 @@ memContextSwitchBack();
|
||||
<The memory context must now be kept or discarded>
|
||||
memContextKeep()/memContextDiscard();
|
||||
|
||||
There is no need to implement any error handling. The mem context system will automatically clean up any mem contexts that were
|
||||
There is no need to implement any error handling. The mem context system will automatically clean up any mem contexts that were
|
||||
created but not marked as keep when an error occurs and reset the current mem context to whatever it was at the beginning of the
|
||||
nearest try block.
|
||||
|
||||
@ -283,8 +283,8 @@ FN_EXTERN void memContextMove(MemContext *this, MemContext *parentNew);
|
||||
// Set a function that will be called when this mem context is freed
|
||||
FN_EXTERN void memContextCallbackSet(MemContext *this, void (*callbackFunction)(void *), void *);
|
||||
|
||||
// Clear the callback function so it won't be called when the mem context is freed. This is usually done in the object free method
|
||||
// after resources have been freed but before memContextFree() is called. The goal is to prevent the object free method from being
|
||||
// Clear the callback function so it won't be called when the mem context is freed. This is usually done in the object free method
|
||||
// after resources have been freed but before memContextFree() is called. The goal is to prevent the object free method from being
|
||||
// called more than once.
|
||||
FN_EXTERN void memContextCallbackClear(MemContext *this);
|
||||
|
||||
@ -306,8 +306,8 @@ FN_EXTERN MemContext *memContextCurrent(void);
|
||||
// Prior context, i.e. the context that was current before the last memContextSwitch()
|
||||
FN_EXTERN MemContext *memContextPrior(void);
|
||||
|
||||
// "top" context. This context is created at initialization and is always present, i.e. it is never freed. The top context is a
|
||||
// good place to put long-lived mem contexts since they won't be automatically freed until the program exits.
|
||||
// "top" context. This context is created at initialization and is always present, i.e. it is never freed. The top context is a good
|
||||
// place to put long-lived mem contexts since they won't be automatically freed until the program exits.
|
||||
FN_EXTERN MemContext *memContextTop(void);
|
||||
|
||||
// Get total size of mem context and all children
|
||||
@ -326,7 +326,7 @@ Macros for function logging
|
||||
/***********************************************************************************************************************************
|
||||
Internal functions
|
||||
***********************************************************************************************************************************/
|
||||
// Clean up mem contexts after an error. Should only be called from error handling routines.
|
||||
// Clean up mem contexts after an error. Should only be called from error handling routines.
|
||||
FN_EXTERN void memContextClean(unsigned int tryDepth, bool fatal);
|
||||
|
||||
#endif
|
||||
|
@ -174,7 +174,7 @@ stackTraceParamBuffer(const char *paramName)
|
||||
data->paramOverflow = true;
|
||||
|
||||
// There's no way to stop the parameter from being formatted so we reserve a space at the end where the format can safely
|
||||
// take place and not disturb the rest of the buffer. Hopefully overflows just won't happen but we need to be prepared in
|
||||
// take place and not disturb the rest of the buffer. Hopefully overflows just won't happen but we need to be prepared in
|
||||
// case of runaway recursion or some other issue that fills the buffer because we don't want a segfault.
|
||||
return stackTraceLocal.functionParamBuffer + sizeof(stackTraceLocal.functionParamBuffer) - STACK_TRACE_PARAM_MAX;
|
||||
}
|
||||
|
@ -157,8 +157,8 @@ Macros for constant buffers
|
||||
|
||||
Frequently used constant buffers can be declared with these macros at compile time rather than dynamically at run time.
|
||||
|
||||
Note that buffers created in this way are declared as const so can't be modified or freed by the buf*() methods. Casting to
|
||||
Buffer * will result in a segfault.
|
||||
Note that buffers created in this way are declared as const so can't be modified or freed by the buf*() methods. Casting to Buffer *
|
||||
will result in a segfault.
|
||||
|
||||
By convention all buffer constant identifiers are appended with _BUF.
|
||||
***********************************************************************************************************************************/
|
||||
|
@ -45,7 +45,7 @@ Integer types (packTypeMapData[type].valueMultiBit) when an unsigned value is >
|
||||
|
||||
Example: 5e021f
|
||||
5 = signed int 64 type
|
||||
e = tag byte low bits: 1 1 1 0 meaning:
|
||||
e = tag byte low bits: 1 1 1 0 meaning:
|
||||
"more value indicator bit set to 1" - the actual value is < -1 or > 0
|
||||
"more ID delta indicator bit" - there exists a gap (i.e. NULLs are not stored so there is a gap between the stored IDs)
|
||||
"ID delta low order bits" - here the bit 1 is set to 1 and bit 0 is not so the ID delta has the second low order bit set but
|
||||
@ -63,7 +63,7 @@ String, binary types, and boolean (packTypeMapData[type].valueSingleBit):
|
||||
|
||||
Example: 8c090673616d706c65
|
||||
8 = string type
|
||||
c = tag byte low bits: 1 1 0 0 meaning:
|
||||
c = tag byte low bits: 1 1 0 0 meaning:
|
||||
"value bit" - there is data
|
||||
"more ID delta indicator bit" - there exists a gap (i.e. NULLs are not stored so there is a gap between the stored IDs)
|
||||
09 = since neither "ID delta low order bits" is set in the tag, they are both 0, so shifting 9 left by 2, the 2 low order bits
|
||||
|
@ -19,7 +19,7 @@ incurs extra cost, but depending on the field type larger gaps may require addit
|
||||
|
||||
The standard default is the C default for that type (e.g. bool = false, int = 0) but can be changed with the .defaultValue
|
||||
parameter. For example, pckWriteBoolP(write, false, .defaultWrite = true) will write a 0 (i.e. false) with a field ID into the pack,
|
||||
but pckWriteBoolP(write, false) will not write to the pack, it will simply skip the ID. Note that
|
||||
but pckWriteBoolP(write, false) will not write to the pack, it will simply skip the ID. Note that
|
||||
pckWriteStrP(packWrite, NULL, .defaultWrite = true) is not valid since there is no way to explicitly write a NULL.
|
||||
|
||||
NULLs are not stored in a pack and are therefore not typed. A NULL is essentially just a gap in the field IDs. Fields that are
|
||||
|
@ -770,8 +770,8 @@ strPathAbsolute(const String *this, const String *base)
|
||||
{
|
||||
result = strDup(this);
|
||||
}
|
||||
// Else we'll need to construct the absolute path. You would hope we could use realpath() here but it is so broken in the
|
||||
// Posix spec that is seems best avoided.
|
||||
// Else we'll need to construct the absolute path. You would hope we could use realpath() here but it is so broken in the Posix
|
||||
// spec that is seems best avoided.
|
||||
else
|
||||
{
|
||||
ASSERT(base != NULL);
|
||||
|
@ -215,8 +215,8 @@ Macros for constant strings
|
||||
|
||||
Frequently used constant strings can be declared with these macros at compile time rather than dynamically at run time.
|
||||
|
||||
Note that strings created in this way are declared as const so can't be modified or freed by the str*() methods. Casting to
|
||||
String * will result in a segfault due to modifying read-only memory.
|
||||
Note that strings created in this way are declared as const so can't be modified or freed by the str*() methods. Casting to String *
|
||||
will result in a segfault due to modifying read-only memory.
|
||||
|
||||
By convention all string constant identifiers are appended with _STR.
|
||||
***********************************************************************************************************************************/
|
||||
|
@ -264,7 +264,7 @@ varBoolForce(const Variant *this)
|
||||
|
||||
case varTypeString:
|
||||
{
|
||||
// List of false/true boolean string values. Note that false/true values must be equal.
|
||||
// List of false/true boolean string values. Note that false/true values must be equal.
|
||||
static const char *const boolString[] =
|
||||
{
|
||||
"n", "f", "0", "no", FALSE_Z, "off",
|
||||
|
@ -162,7 +162,7 @@ Macros for constant variants
|
||||
|
||||
Frequently used constant variants can be declared with these macros at compile time rather than dynamically at run time.
|
||||
|
||||
Note that variants created in this way are declared as const so can't be modified or freed by the var*() methods. Casting to
|
||||
Note that variants created in this way are declared as const so can't be modified or freed by the var*() methods. Casting to
|
||||
Variant * will generally result in a segfault.
|
||||
|
||||
By convention all variant constant identifiers are appended with _VAR.
|
||||
|
@ -38,10 +38,10 @@ struct XmlDocument
|
||||
/***********************************************************************************************************************************
|
||||
Error handler
|
||||
|
||||
For now this is a noop until more detailed error messages are needed. The function is called multiple times per error, so the
|
||||
For now this is a noop until more detailed error messages are needed. The function is called multiple times per error, so the
|
||||
messages need to be accumulated and then returned together.
|
||||
|
||||
This empty function is required because without it libxml2 will dump errors to stdout. Really.
|
||||
This empty function is required because without it libxml2 will dump errors to stdout. Really.
|
||||
***********************************************************************************************************************************/
|
||||
static void
|
||||
xmlErrorHandler(void *ctx, const char *format, ...)
|
||||
|
@ -11,31 +11,31 @@ System User/Group Management
|
||||
/***********************************************************************************************************************************
|
||||
Functions
|
||||
***********************************************************************************************************************************/
|
||||
// Call this initializer before using any of the functions below. Safe to call more than once.
|
||||
// Call this initializer before using any of the functions below. Safe to call more than once.
|
||||
FN_EXTERN void userInit(void);
|
||||
|
||||
// Get the primary group id of the current user
|
||||
FN_EXTERN gid_t groupId(void);
|
||||
|
||||
// Get the id of the specified group. Returns (gid_t)-1 if not found.
|
||||
// Get the id of the specified group. Returns (gid_t)-1 if not found.
|
||||
FN_EXTERN gid_t groupIdFromName(const String *groupName);
|
||||
|
||||
// Get the primary group name of the current user. Returns NULL if there is no mapping.
|
||||
// Get the primary group name of the current user. Returns NULL if there is no mapping.
|
||||
FN_EXTERN const String *groupName(void);
|
||||
|
||||
// Get the group name from a group id. Returns NULL if the group id is invalid or there is no mapping.
|
||||
// Get the group name from a group id. Returns NULL if the group id is invalid or there is no mapping.
|
||||
FN_EXTERN String *groupNameFromId(gid_t groupId);
|
||||
|
||||
// Get the id of the current user
|
||||
FN_EXTERN uid_t userId(void);
|
||||
|
||||
// Get the id of the specified user. Returns (uid_t)-1 if not found.
|
||||
// Get the id of the specified user. Returns (uid_t)-1 if not found.
|
||||
FN_EXTERN uid_t userIdFromName(const String *userName);
|
||||
|
||||
// Get the name of the current user. Returns NULL if there is no mapping.
|
||||
// Get the name of the current user. Returns NULL if there is no mapping.
|
||||
FN_EXTERN const String *userName(void);
|
||||
|
||||
// Get the user name from a user id. Returns NULL if the user id is invalid or there is no mapping.
|
||||
// Get the user name from a user id. Returns NULL if the user id is invalid or there is no mapping.
|
||||
FN_EXTERN String *userNameFromId(uid_t userId);
|
||||
|
||||
// Is the current user the root user?
|
||||
|
@ -1,8 +1,8 @@
|
||||
/***********************************************************************************************************************************
|
||||
Command and Option Configuration
|
||||
|
||||
This module serves as a database for the configuration options. The configuration rules reside in config/define.c and
|
||||
config/parse.c sets the command and options and determines which options are valid for a command.
|
||||
This module serves as a database for the configuration options. The configuration rules reside in config/define.c and config/parse.c
|
||||
sets the command and options and determines which options are valid for a command.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef CONFIG_CONFIG_H
|
||||
#define CONFIG_CONFIG_H
|
||||
@ -16,7 +16,7 @@ config/parse.c sets the command and options and determines which options are val
|
||||
/***********************************************************************************************************************************
|
||||
Command Role Enum
|
||||
|
||||
Commands may have multiple processes that work together to implement their functionality. These roles allow each process to know
|
||||
Commands may have multiple processes that work together to implement their functionality. These roles allow each process to know
|
||||
what it is supposed to do.
|
||||
***********************************************************************************************************************************/
|
||||
typedef enum
|
||||
@ -24,11 +24,11 @@ typedef enum
|
||||
// Called directly by the user. This is the main process of the command that may or may not spawn other command roles.
|
||||
cfgCmdRoleMain = 0,
|
||||
|
||||
// Async worker that is spawned so the main process can return a result while work continues. An async worker may spawn local
|
||||
// or remote workers.
|
||||
// Async worker that is spawned so the main process can return a result while work continues. An async worker may spawn local or
|
||||
// remote workers.
|
||||
cfgCmdRoleAsync,
|
||||
|
||||
// Local worker for parallelizing jobs. A local work may spawn a remote worker.
|
||||
// Local worker for parallelizing jobs. A local work may spawn a remote worker.
|
||||
cfgCmdRoleLocal,
|
||||
|
||||
// Remote worker for accessing resources on another host
|
||||
@ -221,7 +221,7 @@ FN_EXTERN bool cfgOptionIdxTest(ConfigOption optionId, unsigned int optionIdx);
|
||||
/***********************************************************************************************************************************
|
||||
Option Source Enum
|
||||
|
||||
Defines where an option was sourced from. The source is only needed when determining what parameters should be passed to a remote
|
||||
Defines where an option was sourced from. The source is only needed when determining what parameters should be passed to a remote
|
||||
process.
|
||||
***********************************************************************************************************************************/
|
||||
typedef enum
|
||||
@ -234,7 +234,7 @@ typedef enum
|
||||
/***********************************************************************************************************************************
|
||||
Load Functions
|
||||
|
||||
Used primarily by modules that need to manipulate the configuration. These modules include, but are not limited to, config/parse.c,
|
||||
Used primarily by modules that need to manipulate the configuration. These modules include, but are not limited to, config/parse.c,
|
||||
config/load.c.
|
||||
***********************************************************************************************************************************/
|
||||
// Was help requested?
|
||||
@ -245,8 +245,8 @@ FN_EXTERN void cfgCommandSet(ConfigCommand commandId, ConfigCommandRole commandR
|
||||
// pgBackRest exe
|
||||
FN_EXTERN const String *cfgExe(void);
|
||||
|
||||
// Set option default. Option defaults are generally not set in advance because the vast majority of them are never used. It is
|
||||
// more efficient to generate them when they are requested. Some defaults are (e.g. the exe path) are set at runtime.
|
||||
// Set option default. Option defaults are generally not set in advance because the vast majority of them are never used. It is more
|
||||
// efficient to generate them when they are requested. Some defaults are (e.g. the exe path) are set at runtime.
|
||||
FN_EXTERN void cfgOptionDefaultSet(ConfigOption optionId, const Variant *defaultValue);
|
||||
|
||||
// Was the option negated?
|
||||
|
@ -12,7 +12,7 @@ Functions
|
||||
***********************************************************************************************************************************/
|
||||
// Generate a list of options required for execution of a new command, overriding options with the values in optionReplace when
|
||||
// present. If local is set then the new command must have access to the local configuration files and environment since only
|
||||
// options originally passed on the command-line will be added to the list. Use quote when the options will be used as a
|
||||
// options originally passed on the command-line will be added to the list. Use quote when the options will be used as a
|
||||
// concatenated string rather than being passed directly to exec*() as a list.
|
||||
FN_EXTERN StringList *cfgExecParam(
|
||||
ConfigCommand commandId, ConfigCommandRole commandRoleId, const KeyValue *optionReplace, bool local, bool quote);
|
||||
|
@ -1417,7 +1417,7 @@ cfgFileLoad(
|
||||
}
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
??? Add validation of section names and check all sections for invalid options in the check command. It's too expensive to add the
|
||||
??? Add validation of section names and check all sections for invalid options in the check command. It's too expensive to add the
|
||||
logic to this critical path code.
|
||||
***********************************************************************************************************************************/
|
||||
FN_EXTERN void
|
||||
@ -2057,7 +2057,7 @@ cfgParse(const Storage *const storage, const unsigned int argListSize, const cha
|
||||
|
||||
for (unsigned int optionOrderIdx = 0; optionOrderIdx < CFG_OPTION_TOTAL; optionOrderIdx++)
|
||||
{
|
||||
// Validate options based on the option resolve order. This allows resolving all options in a single pass.
|
||||
// Validate options based on the option resolve order. This allows resolving all options in a single pass.
|
||||
ConfigOption optionId = optionResolveOrder[optionOrderIdx];
|
||||
|
||||
// Skip this option if it is not valid
|
||||
|
4
src/configure
vendored
4
src/configure
vendored
@ -2332,7 +2332,7 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
|
||||
# ----------------------------------------------------------------------------------------------------------------------------------
|
||||
: ${CFLAGS=""}
|
||||
|
||||
# Build C standard based on the host type. C99 is required and other flags are added depending on the host.
|
||||
# Build C standard based on the host type. C99 is required and other flags are added depending on the host.
|
||||
# ----------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
@ -5599,4 +5599,4 @@ if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
|
||||
printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
|
||||
fi
|
||||
|
||||
# Generated from src/build/configure.ac sha1 380d8dd159305ac9e6da7816064a41012205ce6f
|
||||
# Generated from src/build/configure.ac sha1 5bd14429291b37c7b69a19b6086863c15530e138
|
||||
|
12
src/db/db.c
12
src/db/db.c
@ -37,7 +37,7 @@ struct Db
|
||||
};
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Close protocol connection. No need to close a locally created PgClient since it has its own destructor.
|
||||
Close protocol connection. No need to close a locally created PgClient since it has its own destructor.
|
||||
***********************************************************************************************************************************/
|
||||
static void
|
||||
dbFreeResource(THIS_VOID)
|
||||
@ -256,11 +256,11 @@ dbOpen(Db *this)
|
||||
this->pub.dbTimeout = cfgOptionUInt64(cfgOptDbTimeout);
|
||||
}
|
||||
|
||||
// Set search_path to prevent overrides of the functions we expect to call. All queries should also be schema-qualified,
|
||||
// Set search_path to prevent overrides of the functions we expect to call. All queries should also be schema-qualified,
|
||||
// but this is an extra level protection.
|
||||
dbExec(this, STRDEF("set search_path = 'pg_catalog'"));
|
||||
|
||||
// Set client encoding to UTF8. This is the only encoding (other than ASCII) that we can safely work with.
|
||||
// Set client encoding to UTF8. This is the only encoding (other than ASCII) that we can safely work with.
|
||||
dbExec(this, STRDEF("set client_encoding = 'UTF8'"));
|
||||
|
||||
// Query the version and data_directory. Be sure the update the total in the null check below when adding/removing columns.
|
||||
@ -291,7 +291,7 @@ dbOpen(Db *this)
|
||||
// Restart the read to get the data
|
||||
read = pckReadNew(row);
|
||||
|
||||
// Strip the minor version off since we don't need it. In the future it might be a good idea to warn users when they are
|
||||
// Strip the minor version off since we don't need it. In the future it might be a good idea to warn users when they are
|
||||
// running an old minor version.
|
||||
this->pub.pgVersion = (unsigned int)pckReadI32P(read) / 100 * 100;
|
||||
|
||||
@ -382,7 +382,7 @@ dbBackupStart(Db *const this, const bool startFast, const bool stopAuto, const b
|
||||
MEM_CONTEXT_TEMP_BEGIN()
|
||||
{
|
||||
// Acquire the backup advisory lock to make sure that backups are not running from multiple backup servers against the same
|
||||
// database cluster. This lock helps make the stop-auto option safe.
|
||||
// database cluster. This lock helps make the stop-auto option safe.
|
||||
if (!pckReadBoolP(dbQueryColumn(this, STRDEF("select pg_catalog.pg_try_advisory_lock(" PG_BACKUP_ADVISORY_LOCK ")::bool"))))
|
||||
{
|
||||
THROW(
|
||||
@ -659,7 +659,7 @@ dbReplayWait(Db *const this, const String *const targetLsn, const uint32_t targe
|
||||
PackRead *read = dbQueryRow(this, query);
|
||||
replayLsn = pckReadStrP(read);
|
||||
|
||||
// Error when replayLsn is null which indicates that this is not a standby. This should have been sorted out before we
|
||||
// Error when replayLsn is null which indicates that this is not a standby. This should have been sorted out before we
|
||||
// connected but it's possible that the standby was promoted in the meantime.
|
||||
if (replayLsn == NULL)
|
||||
{
|
||||
|
@ -1,7 +1,7 @@
|
||||
/***********************************************************************************************************************************
|
||||
Database Client
|
||||
|
||||
Implements the required PostgreSQL queries and commands. Notice that there is no general purpose query function -- all queries are
|
||||
Implements the required PostgreSQL queries and commands. Notice that there is no general purpose query function -- all queries are
|
||||
expected to be embedded in this object.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef DB_DB_H
|
||||
|
@ -24,9 +24,9 @@ Constants
|
||||
/***********************************************************************************************************************************
|
||||
Function types for loading and saving
|
||||
***********************************************************************************************************************************/
|
||||
// The purpose of this callback is to attempt a load (from file or otherwise). Return true when the load is successful or throw an
|
||||
// error. Return false when there are no more loads to try, but always make at least one load attempt. The try parameter will
|
||||
// start at 0 and be incremented on each call.
|
||||
// The purpose of this callback is to attempt a load (from file or otherwise). Return true when the load is successful or throw an
|
||||
// error. Return false when there are no more loads to try, but always make at least one load attempt. The try parameter will start
|
||||
// at 0 and be incremented on each call.
|
||||
// {uncrustify_off - uncrustify unable to parse this statement}
|
||||
typedef bool InfoLoadCallback(void *data, unsigned int try);
|
||||
// {uncrustify_on}
|
||||
|
@ -46,7 +46,7 @@ struct Manifest
|
||||
/***********************************************************************************************************************************
|
||||
Internal functions to add types to their lists
|
||||
***********************************************************************************************************************************/
|
||||
// Helper to add owner to the owner list if it is not there already and return the pointer. This saves a lot of space.
|
||||
// Helper to add owner to the owner list if it is not there already and return the pointer. This saves a lot of space.
|
||||
static const String *
|
||||
manifestOwnerCache(Manifest *this, const String *owner)
|
||||
{
|
||||
@ -865,7 +865,7 @@ manifestBuildInfo(
|
||||
ASSERT(pgPath != NULL);
|
||||
ASSERT(info != NULL);
|
||||
|
||||
// Skip any path/file/link that begins with pgsql_tmp. The files are removed when the server is restarted and the directories
|
||||
// Skip any path/file/link that begins with pgsql_tmp. The files are removed when the server is restarted and the directories
|
||||
// are recreated.
|
||||
if (strBeginsWithZ(info->name, PG_PREFIX_PGSQLTMP))
|
||||
FUNCTION_TEST_RETURN_VOID();
|
||||
@ -997,9 +997,9 @@ manifestBuildInfo(
|
||||
strZ(manifestName));
|
||||
}
|
||||
|
||||
// Skip pg_internal.init since it is recreated on startup. It's also possible, (though unlikely) that a temp file with
|
||||
// the creating process id as the extension can exist so skip that as well. This seems to be a bug in PostgreSQL since
|
||||
// the temp file should be removed on startup. Use regExpMatchOne() here instead of preparing a regexp in advance since
|
||||
// Skip pg_internal.init since it is recreated on startup. It's also possible, (though unlikely) that a temp file with
|
||||
// the creating process id as the extension can exist so skip that as well. This seems to be a bug in PostgreSQL since
|
||||
// the temp file should be removed on startup. Use regExpMatchOne() here instead of preparing a regexp in advance since
|
||||
// the likelihood of needing the regexp should be very small.
|
||||
if (dbPath && strBeginsWithZ(info->name, PG_FILE_PGINTERNALINIT) &&
|
||||
(strSize(info->name) == sizeof(PG_FILE_PGINTERNALINIT) - 1 ||
|
||||
@ -1086,8 +1086,8 @@ manifestBuildInfo(
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
case storageTypeLink:
|
||||
{
|
||||
// If the destination is another link then error. In the future we'll allow this by following the link chain to the
|
||||
// eventual destination but for now we are trying to maintain compatibility during the migration. To do this check we
|
||||
// If the destination is another link then error. In the future we'll allow this by following the link chain to the
|
||||
// eventual destination but for now we are trying to maintain compatibility during the migration. To do this check we
|
||||
// need to read outside of the data directory but it is a read-only operation so is considered safe.
|
||||
const String *linkDestinationAbsolute = strPathAbsolute(info->linkDestination, pgPath);
|
||||
|
||||
@ -1122,7 +1122,7 @@ manifestBuildInfo(
|
||||
// Is this a tablespace?
|
||||
if (strEq(manifestParentName, STRDEF(MANIFEST_TARGET_PGDATA "/" MANIFEST_TARGET_PGTBLSPC)))
|
||||
{
|
||||
// Strip pg_data off the manifest name so it begins with pg_tblspc instead. This reflects how the files are stored
|
||||
// Strip pg_data off the manifest name so it begins with pg_tblspc instead. This reflects how the files are stored
|
||||
// in the backup directory.
|
||||
manifestName = strSub(manifestName, sizeof(MANIFEST_TARGET_PGDATA));
|
||||
|
||||
@ -1146,7 +1146,7 @@ manifestBuildInfo(
|
||||
pckReadArrayEndP(read);
|
||||
}
|
||||
|
||||
// Error if the tablespace could not be found. ??? This seems excessive, perhaps just warn here?
|
||||
// Error if the tablespace could not be found. ??? This seems excessive, perhaps just warn here?
|
||||
if (target.tablespaceName == NULL)
|
||||
{
|
||||
THROW_FMT(
|
||||
@ -1161,7 +1161,7 @@ manifestBuildInfo(
|
||||
if (target.tablespaceName == NULL)
|
||||
target.tablespaceName = strNewFmt("ts%s", strZ(info->name));
|
||||
|
||||
// Add a dummy pg_tblspc path entry if it does not already exist. This entry will be ignored by restore but it is
|
||||
// Add a dummy pg_tblspc path entry if it does not already exist. This entry will be ignored by restore but it is
|
||||
// part of the original manifest format so we need to have it.
|
||||
lstSort(buildData->manifest->pub.pathList, sortOrderAsc);
|
||||
const ManifestPath *pathBase = manifestPathFind(buildData->manifest, MANIFEST_TARGET_PGDATA_STR);
|
||||
@ -1226,8 +1226,8 @@ manifestBuildInfo(
|
||||
target.file = strBase(info->linkDestination);
|
||||
}
|
||||
}
|
||||
// Else dummy up the target with a destination so manifestLinkCheck() can be run. This is so errors about links with
|
||||
// destinations in PGDATA will take precedence over missing a destination. We will probably simplify this once the
|
||||
// Else dummy up the target with a destination so manifestLinkCheck() can be run. This is so errors about links with
|
||||
// destinations in PGDATA will take precedence over missing a destination. We will probably simplify this once the
|
||||
// migration is done and it doesn't matter which error takes precedence.
|
||||
else
|
||||
target.path = info->linkDestination;
|
||||
@ -1416,8 +1416,8 @@ manifestNewBuild(
|
||||
lstSort(this->pub.pathList, sortOrderAsc);
|
||||
lstSort(this->pub.targetList, sortOrderAsc);
|
||||
|
||||
// Remove unlogged relations from the manifest. This can't be done during the initial build because of the requirement
|
||||
// to check for _init files which will sort after the vast majority of the relation files. We could check storage for
|
||||
// Remove unlogged relations from the manifest. This can't be done during the initial build because of the requirement
|
||||
// to check for _init files which will sort after the vast majority of the relation files. We could check storage for
|
||||
// each _init file but that would be expensive.
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
RegExp *relationExp = regExpNew(strNewFmt("^" DB_PATH_EXP "/" RELATION_EXP "$", strZ(buildData.tablespaceId)));
|
||||
@ -1432,7 +1432,7 @@ manifestNewBuild(
|
||||
|
||||
while (fileIdx < manifestFileTotal(this))
|
||||
{
|
||||
// If this file looks like a relation. Note that this never matches on _init forks.
|
||||
// If this file looks like a relation. Note that this never matches on _init forks.
|
||||
const String *const filePathName = manifestFileNameGet(this, fileIdx);
|
||||
|
||||
if (regExpMatch(relationExp, filePathName))
|
||||
@ -1510,12 +1510,12 @@ manifestBuildValidate(Manifest *this, bool delta, time_t copyStart, CompressType
|
||||
|
||||
MEM_CONTEXT_BEGIN(this->pub.memContext)
|
||||
{
|
||||
// Store the delta option. If true we can skip checks that automatically enable delta.
|
||||
// Store the delta option. If true we can skip checks that automatically enable delta.
|
||||
this->pub.data.backupOptionDelta = varNewBool(delta);
|
||||
|
||||
// If online then add one second to the copy start time to allow for database updates during the last second that the
|
||||
// manifest was being built. It's up to the caller to actually wait the remainder of the second, but for comparison
|
||||
// purposes we want the time when the waiting started.
|
||||
// manifest was being built. It's up to the caller to actually wait the remainder of the second, but for comparison purposes
|
||||
// we want the time when the waiting started.
|
||||
this->pub.data.backupTimestampCopyStart = copyStart + (this->pub.data.backupOptionOnline ? 1 : 0);
|
||||
|
||||
// This value is not needed in this function, but it is needed for resumed manifests and this is last place to set it before
|
||||
@ -1608,7 +1608,7 @@ manifestBuildIncr(Manifest *this, const Manifest *manifestPrior, BackupType type
|
||||
this->pub.data.backupOptionDelta = BOOL_TRUE_VAR;
|
||||
}
|
||||
|
||||
// Check for anomalies between manifests if delta is not already enabled. This can't be combined with the main comparison
|
||||
// Check for anomalies between manifests if delta is not already enabled. This can't be combined with the main comparison
|
||||
// loop below because delta changes the behavior of that loop.
|
||||
if (!varBool(this->pub.data.backupOptionDelta))
|
||||
{
|
||||
@ -1903,8 +1903,8 @@ typedef struct ManifestLoadData
|
||||
const Variant *pathUserDefault; // Path default user
|
||||
} ManifestLoadData;
|
||||
|
||||
// Helper to transform a variant that could be boolean or string into a string. If the boolean is false return NULL else return
|
||||
// the string. The boolean cannot be true.
|
||||
// Helper to transform a variant that could be boolean or string into a string. If the boolean is false return NULL else return the
|
||||
// string. The boolean cannot be true.
|
||||
static const String *
|
||||
manifestOwnerGet(const Variant *owner)
|
||||
{
|
||||
@ -1914,7 +1914,7 @@ manifestOwnerGet(const Variant *owner)
|
||||
|
||||
ASSERT(owner != NULL);
|
||||
|
||||
// If bool then it should be false. This indicates that the owner could not be mapped to a name during the backup.
|
||||
// If bool then it should be false. This indicates that the owner could not be mapped to a name during the backup.
|
||||
if (varType(owner) == varTypeBool)
|
||||
{
|
||||
CHECK(FormatError, !varBool(owner), "owner bool must be false");
|
||||
@ -2298,8 +2298,8 @@ manifestLoadCallback(void *callbackData, const String *const section, const Stri
|
||||
// Historically this option meant to add gz compression
|
||||
else if (strEqZ(key, MANIFEST_KEY_OPTION_COMPRESS))
|
||||
manifest->pub.data.backupOptionCompressType = varBool(jsonToVar(value)) ? compressTypeGz : compressTypeNone;
|
||||
// This new option allows any type of compression to be specified. It must be parsed after the option above so the
|
||||
// value does not get overwritten. Since options are stored in alpha order this should always be true.
|
||||
// This new option allows any type of compression to be specified. It must be parsed after the option above so the value
|
||||
// does not get overwritten. Since options are stored in alpha order this should always be true.
|
||||
else if (strEqZ(key, MANIFEST_KEY_OPTION_COMPRESS_TYPE))
|
||||
manifest->pub.data.backupOptionCompressType = compressTypeEnum(strIdFromStr(varStr(jsonToVar(value))));
|
||||
else if (strEqZ(key, MANIFEST_KEY_OPTION_HARDLINK))
|
||||
@ -2402,7 +2402,7 @@ manifestNewLoad(IoRead *read)
|
||||
path->user = manifestOwnerCache(this, manifestOwnerGet(loadData.pathUserDefault));
|
||||
}
|
||||
|
||||
// Sort the lists. They should already be sorted in the file but it is possible that this system has a different collation
|
||||
// Sort the lists. They should already be sorted in the file but it is possible that this system has a different collation
|
||||
// that renders that sort useless.
|
||||
//
|
||||
// This must happen *after* the default processing because found lists are in natural file order and it is not worth writing
|
||||
@ -2435,7 +2435,7 @@ typedef struct ManifestSaveData
|
||||
mode_t pathModeDefault; // Path default mode
|
||||
} ManifestSaveData;
|
||||
|
||||
// Helper to convert the owner MCV to a default. If the input is NULL boolean false should be returned, else the owner string.
|
||||
// Helper to convert the owner MCV to a default. If the input is NULL boolean false should be returned, else the owner string.
|
||||
static const Variant *
|
||||
manifestOwnerVar(const String *ownerDefault)
|
||||
{
|
||||
@ -2590,7 +2590,7 @@ manifestSaveCallback(void *const callbackData, const String *const sectionNext,
|
||||
jsonFromVar(manifest->pub.data.backupOptionChecksumPage));
|
||||
}
|
||||
|
||||
// Set the option when compression is turned on. In older versions this also implied gz compression but in newer versions
|
||||
// Set the option when compression is turned on. In older versions this also implied gz compression but in newer versions
|
||||
// the type option must also be set if compression is not gz.
|
||||
infoSaveValue(
|
||||
infoSaveData, MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_OPTION_COMPRESS,
|
||||
@ -2610,7 +2610,7 @@ manifestSaveCallback(void *const callbackData, const String *const sectionNext,
|
||||
jsonFromVar(manifest->pub.data.backupOptionCompressLevelNetwork));
|
||||
}
|
||||
|
||||
// Set the compression type. Older versions will ignore this and assume gz compression if the compress option is set.
|
||||
// Set the compression type. Older versions will ignore this and assume gz compression if the compress option is set.
|
||||
infoSaveValue(
|
||||
infoSaveData, MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_OPTION_COMPRESS_TYPE,
|
||||
jsonFromVar(VARSTR(compressTypeStr(manifest->pub.data.backupOptionCompressType))));
|
||||
@ -2734,8 +2734,8 @@ manifestSaveCallback(void *const callbackData, const String *const sectionNext,
|
||||
jsonWriteUInt64(jsonWriteKeyStrId(json, MANIFEST_KEY_BUNDLE_OFFSET), file.bundleOffset);
|
||||
}
|
||||
|
||||
// Save if the file size is not zero and the checksum exists. The checksum might not exist if this is a partial
|
||||
// save performed during a backup.
|
||||
// Save if the file size is not zero and the checksum exists. The checksum might not exist if this is a partial save
|
||||
// performed during a backup.
|
||||
if (file.size != 0 && file.checksumSha1 != NULL)
|
||||
{
|
||||
jsonWriteStr(
|
||||
|
@ -2,10 +2,10 @@
|
||||
Backup Manifest Handler
|
||||
|
||||
The backup manifest stores a complete list of all files, links, and paths in a backup along with metadata such as checksums, sizes,
|
||||
timestamps, etc. A list of databases is also included for selective restore.
|
||||
timestamps, etc. A list of databases is also included for selective restore.
|
||||
|
||||
The purpose of the manifest is to allow the restore command to confidently reconstruct the PostgreSQL data directory and ensure that
|
||||
nothing is missing or corrupt. It is also useful for reporting, e.g. size of backup, backup time, etc.
|
||||
nothing is missing or corrupt. It is also useful for reporting, e.g. size of backup, backup time, etc.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef INFO_MANIFEST_H
|
||||
#define INFO_MANIFEST_H
|
||||
@ -293,7 +293,7 @@ manifestMove(Manifest *const this, MemContext *const parentNew)
|
||||
// Manifest save
|
||||
FN_EXTERN void manifestSave(Manifest *this, IoWrite *write);
|
||||
|
||||
// Validate a completed manifest. Use strict mode only when saving the manifest after a backup.
|
||||
// Validate a completed manifest. Use strict mode only when saving the manifest after a backup.
|
||||
FN_EXTERN void manifestValidate(Manifest *this, bool strict);
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
|
@ -290,7 +290,7 @@ pgClientQuery(PgClient *const this, const String *const query, const PgClientQue
|
||||
// Else convert the value to a variant
|
||||
else
|
||||
{
|
||||
// Convert column type. Not all PostgreSQL types are supported but these should suffice.
|
||||
// Convert column type. Not all PostgreSQL types are supported but these should suffice.
|
||||
switch (columnType[columnIdx])
|
||||
{
|
||||
// Boolean type
|
||||
|
@ -1,8 +1,8 @@
|
||||
/***********************************************************************************************************************************
|
||||
PostgreSQL Client
|
||||
|
||||
Connect to a PostgreSQL database and run queries. This is not intended to be a general purpose client but is suitable for
|
||||
pgBackRest's limited needs. In particular, data type support is limited to text, int, and bool types so it may be necessary to add
|
||||
Connect to a PostgreSQL database and run queries. This is not intended to be a general purpose client but is suitable for
|
||||
pgBackRest's limited needs. In particular, data type support is limited to text, int, and bool types so it may be necessary to add
|
||||
casts to queries to output one of these types.
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef POSTGRES_QUERY_H
|
||||
|
@ -1,12 +1,12 @@
|
||||
/***********************************************************************************************************************************
|
||||
PostgreSQL Page Checksum Algorithm
|
||||
|
||||
For each supported release of PostgreSQL check the code in this file to see if it has changed. The easiest way to do this is to
|
||||
copy and paste in place and check git to see if there are any diffs. Tabs should be copied as is to make this process easy even
|
||||
For each supported release of PostgreSQL check the code in this file to see if it has changed. The easiest way to do this is to
|
||||
copy and paste in place and check git to see if there are any diffs. Tabs should be copied as is to make this process easy even
|
||||
though the pgBackRest project does not use tabs elsewhere.
|
||||
|
||||
Since the checksum implementation and page format do not (yet) change between versions this code should be copied verbatim from
|
||||
src/include/storage/checksum_impl.h for each new release. Only the newest released version of the code should be used.
|
||||
src/include/storage/checksum_impl.h for each new release. Only the newest released version of the code should be used.
|
||||
|
||||
Modifications need to be made after copying:
|
||||
|
||||
|
@ -4,15 +4,15 @@ PostgreSQL Types That Do Not Vary By Version
|
||||
Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
|
||||
Portions Copyright (c) 1994, Regents of the University of California
|
||||
|
||||
For each supported release of PostgreSQL check the types in this file to see if they have changed. The easiest way to do this is to
|
||||
copy and paste in place and check git to see if there are any diffs. Tabs should be copied as is to make this process easy even
|
||||
For each supported release of PostgreSQL check the types in this file to see if they have changed. The easiest way to do this is to
|
||||
copy and paste in place and check git to see if there are any diffs. Tabs should be copied as is to make this process easy even
|
||||
though the pgBackRest project does not use tabs elsewhere.
|
||||
|
||||
Comments should be copied with the types they apply to, even if the comment has not changed. This does get repetitive, but has no
|
||||
Comments should be copied with the types they apply to, even if the comment has not changed. This does get repetitive, but has no
|
||||
runtime cost and makes the rules a bit easier to follow.
|
||||
|
||||
If a comment is changed then the newer comment should be copied. If the *type* has changed then it must be moved to version.auto.c
|
||||
which could have a large impact on dependencies. Hopefully that won't happen often.
|
||||
If a comment is changed then the newer comment should be copied. If the *type* has changed then it must be moved to version.auto.c
|
||||
which could have a large impact on dependencies. Hopefully that won't happen often.
|
||||
|
||||
Note when adding new types it is safer to add them to version.auto.c unless they are needed for code that must be compatible across
|
||||
all versions of PostgreSQL supported by pgBackRest.
|
||||
|
@ -4,22 +4,22 @@ PostgreSQL Types That Vary By Version
|
||||
Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
|
||||
Portions Copyright (c) 1994, Regents of the University of California
|
||||
|
||||
For each supported release of PostgreSQL check the types in this file to see if they have changed. The easiest way to do this is to
|
||||
copy and paste in place and check git to see if there are any diffs. Tabs should be copied as is to make this process easy even
|
||||
For each supported release of PostgreSQL check the types in this file to see if they have changed. The easiest way to do this is to
|
||||
copy and paste in place and check git to see if there are any diffs. Tabs should be copied as is to make this process easy even
|
||||
though the pgBackRest project does not use tabs elsewhere.
|
||||
|
||||
New versions should always be added to the top of each type's #if block, underneath `PG_VERSION > PG_VERSION_MAX` to cause as little
|
||||
churn as possible. This also ensures that new versions will not work until PG_VERSION_MAX and this file have been updated.
|
||||
churn as possible. This also ensures that new versions will not work until PG_VERSION_MAX and this file have been updated.
|
||||
|
||||
New data structures do not need to add #elif branches for old versions. See pg_time_t as an example.
|
||||
|
||||
Comments should be copied with the types they apply to, even if the comment has not changed. This does get repetitive, but has no
|
||||
Comments should be copied with the types they apply to, even if the comment has not changed. This does get repetitive, but has no
|
||||
runtime cost and makes the rules a bit easier to follow.
|
||||
|
||||
If a comment has syntax only changes, then the new version of the comment can be applied to older versions of the type.
|
||||
|
||||
If a comment has changed in a way that implies a difference in the way the type is used, then a new version of the comment and type
|
||||
should be created. See the CheckPoint type difference between 9.5 and 9.6 as an example.
|
||||
should be created. See the CheckPoint type difference between 9.5 and 9.6 as an example.
|
||||
***********************************************************************************************************************************/
|
||||
#include "postgres/interface/static.vendor.h"
|
||||
#include "postgres/version.h"
|
||||
|
@ -818,7 +818,7 @@ protocolRemoteGet(ProtocolStorageType protocolStorageType, unsigned int hostIdx)
|
||||
MEM_CONTEXT_END();
|
||||
}
|
||||
|
||||
// Determine protocol id for the remote. If the process option is set then use that since we want the remote protocol id to
|
||||
// Determine protocol id for the remote. If the process option is set then use that since we want the remote protocol id to
|
||||
// match the local protocol id. Otherwise set to 0 since the remote is being started from a main process and there should only
|
||||
// be one remote per host.
|
||||
unsigned int processId = 0;
|
||||
|
@ -135,7 +135,7 @@ protocolParallelProcess(ProtocolParallel *this)
|
||||
// If clients are running then wait for one to finish
|
||||
if (clientRunningTotal > 0)
|
||||
{
|
||||
// Initialize timeout struct used for select. Recreate this structure each time since Linux (at least) will modify it.
|
||||
// Initialize timeout struct used for select. Recreate this structure each time since Linux (at least) will modify it.
|
||||
struct timeval timeoutSelect;
|
||||
timeoutSelect.tv_sec = (time_t)(this->timeout / MSEC_PER_SEC);
|
||||
timeoutSelect.tv_usec = (suseconds_t)(this->timeout % MSEC_PER_SEC * 1000);
|
||||
|
@ -17,7 +17,7 @@ typedef struct ProtocolParallel ProtocolParallel;
|
||||
/***********************************************************************************************************************************
|
||||
Job request callback
|
||||
|
||||
Called whenever a new job is required for processing. If no more jobs are available then NULL is returned. Note that NULL must be
|
||||
Called whenever a new job is required for processing. If no more jobs are available then NULL is returned. Note that NULL must be
|
||||
returned to each clientIdx in case job distribution varies by clientIdx.
|
||||
***********************************************************************************************************************************/
|
||||
typedef ProtocolParallelJob *ParallelJobCallback(void *data, unsigned int clientIdx);
|
||||
|
@ -265,7 +265,7 @@ protocolServerProcess(
|
||||
}
|
||||
|
||||
// Send keep-alive to remotes. When a local process is doing work that does not involve the remote it is important
|
||||
// that the remote does not timeout. This will send a keep alive once per unit of work that is performed by the
|
||||
// that the remote does not timeout. This will send a keep alive once per unit of work that is performed by the
|
||||
// local process.
|
||||
protocolKeepAlive();
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ static struct StorageHelperLocal
|
||||
|
||||
String *stanza; // Stanza for storage
|
||||
bool stanzaInit; // Has the stanza been initialized?
|
||||
bool dryRunInit; // Has dryRun been initialized? If not disallow writes.
|
||||
bool dryRunInit; // Has dryRun been initialized? If not disallow writes.
|
||||
bool dryRun; // Disallow writes in dry-run mode.
|
||||
RegExp *walRegExp; // Regular expression for identifying wal files
|
||||
} storageHelper;
|
||||
|
@ -141,8 +141,8 @@ storageReadPosix(THIS_VOID, Buffer *buffer, bool block)
|
||||
bufUsedInc(buffer, (size_t)actualBytes);
|
||||
this->current += (uint64_t)actualBytes;
|
||||
|
||||
// If less data than expected was read or the limit has been reached then EOF. The file may not actually be EOF but we are
|
||||
// not concerned with files that are growing. Just read up to the point where the file is being extended.
|
||||
// If less data than expected was read or the limit has been reached then EOF. The file may not actually be EOF but we are
|
||||
// not concerned with files that are growing. Just read up to the point where the file is being extended.
|
||||
if ((size_t)actualBytes != expectedBytes || this->current == this->limit)
|
||||
this->eof = true;
|
||||
}
|
||||
@ -227,7 +227,7 @@ storageReadPosixNew(
|
||||
.storage = storage,
|
||||
.fd = -1,
|
||||
|
||||
// Rather than enable/disable limit checking just use a big number when there is no limit. We can feel pretty confident
|
||||
// Rather than enable/disable limit checking just use a big number when there is no limit. We can feel pretty confident
|
||||
// that no files will be > UINT64_MAX in size. This is a copy of the interface limit but it simplifies the code during
|
||||
// read so it seems worthwhile.
|
||||
.limit = limit == NULL ? UINT64_MAX : varUInt64(limit),
|
||||
|
@ -205,8 +205,8 @@ storageS3Auth(
|
||||
AWS4_HMAC_SHA256 "\n%s\n%s/%s/" S3 "/" AWS4_REQUEST "\n%s", strZ(dateTime), strZ(date), strZ(this->region),
|
||||
strZ(strNewEncode(encodingHex, cryptoHashOne(hashTypeSha256, BUFSTR(canonicalRequest)))));
|
||||
|
||||
// Generate signing key. This key only needs to be regenerated every seven days but we'll do it once a day to keep the
|
||||
// logic simple. It's a relatively expensive operation so we'd rather not do it for every request.
|
||||
// Generate signing key. This key only needs to be regenerated every seven days but we'll do it once a day to keep the
|
||||
// logic simple. It's a relatively expensive operation so we'd rather not do it for every request.
|
||||
// If the cached signing key has expired (or has none been generated) then regenerate it
|
||||
if (!strEq(date, this->signingKeyDate))
|
||||
{
|
||||
|
@ -424,8 +424,8 @@ storageMove(const Storage *this, StorageRead *source, StorageWrite *destination)
|
||||
// Remove the source file
|
||||
storageInterfaceRemoveP(storageDriver(this), storageReadName(source));
|
||||
|
||||
// Sync source path if the destination path was synced. We know the source and destination paths are different because
|
||||
// the move did not succeed. This will need updating when drivers other than Posix/CIFS are implemented because there's
|
||||
// Sync source path if the destination path was synced. We know the source and destination paths are different because
|
||||
// the move did not succeed. This will need updating when drivers other than Posix/CIFS are implemented because there's
|
||||
// no way to get coverage on it now.
|
||||
if (storageWriteSyncPath(destination))
|
||||
storageInterfacePathSyncP(storageDriver(this), strPath(storageReadName(source)));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user