1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2024-12-14 10:13:05 +02:00
pgbackrest/doc/xml/user-guide.xml
David Steele c9b49b0d7e Doc engine improvements.
Bug Fixes:

* Fixed and issue that suppressed exceptions in PDF builds.

Features:

* Allow a source to be included as a section so large documents can be broken up.
* Added section link support to Markdown output.
* Added list support to PDF output.
* Added include option to explicitly build sources (complements the exclude option though both cannot be used in the same invocation).
* Added keyword-add option to add keywords without overriding the default keyword.
* Added debug option to doc.pl to easily add the debug keyword to documentation builds.
* Added pre option to doc.pl to easily add the pre keyword to documentation builds.

Refactoring:

* Improvements to markdown rendering.
* Remove code dependency on project variable, instead use title param.
2016-11-17 16:35:11 -05:00

1964 lines
108 KiB
XML

<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE doc SYSTEM "doc.dtd">
<doc title="{[project]} User Guide" subtitle="{[user-guide-subtitle]} / {[postgres]} {[pg-version]}">
<description>The {[project]} User Guide demonstrates how to quickly and easily setup {[project]} for your {[postgres]} database. Step-by-step instructions lead the user through all the important features of the fastest, most reliable {[postgres]} backup and restore solution.</description>
<variable-list>
<!-- Variables used by the rest of the script -->
<variable key="user-guide-subtitle" keyword="default">Debian &amp; Ubuntu</variable>
<variable key="user-guide-subtitle" keyword="co6">RHEL &amp; CentOS 6</variable>
<variable key="user-guide-os" keyword="default">Debian/Ubuntu</variable>
<variable key="user-guide-os" keyword="co6">RHEL/CentOS 6</variable>
<variable key="pg-version">9.4</variable>
<variable key="perl-lib-path">/usr/share/perl5</variable>
<variable key="perl-bin-path">/usr/bin</variable>
<variable key="backrest-repo-path">/var/lib/pgbackrest</variable>
<variable key="postgres-cluster-demo">demo</variable>
<variable key="backrest-config-demo">/etc/{[project-exe]}.conf</variable>
<variable key="db-path-default" keyword="default">/var/lib/postgresql/[version]/[cluster]</variable>
<variable key="db-path-default" keyword="co6">/var/lib/pgsql/[version]/data</variable>
<variable key="db-path" keyword="default">/var/lib/postgresql/{[pg-version]}/{[postgres-cluster-demo]}</variable>
<variable key="db-path" keyword="co6">/var/lib/pgsql/{[pg-version]}/data</variable>
<variable key="spool-path">/var/spool/pgbackrest</variable>
<variable key="postgres-config-demo" keyword="default">/etc/postgresql/{[pg-version]}/{[postgres-cluster-demo]}/postgresql.conf</variable>
<variable key="postgres-config-demo" keyword="co6">{[db-path]}/postgresql.conf</variable>
<variable key="postgres-hba-demo" keyword="default">/etc/postgresql/{[pg-version]}/{[postgres-cluster-demo]}/pg_hba.conf</variable>
<variable key="postgres-hba-demo" keyword="co6">{[db-path]}/pg_hba.conf</variable>
<variable key="postgres-pgpass">/home/postgres/.pgpass</variable>
<variable key="postgres-log-demo" keyword="default">/var/log/postgresql/postgresql-{[pg-version]}-{[postgres-cluster-demo]}.log</variable>
<variable key="postgres-log-demo" keyword="co6">{[db-path]}/pg_log/postgresql.log</variable>
<variable key="postgres-log-pgstartup-demo" keyword="co6">/var/lib/pgsql/{[pg-version]}/pgstartup.log</variable>
<variable key="postgres-recovery-demo" keyword="default">{[db-path]}/recovery.conf</variable>
<variable key="postgres-recovery-demo" keyword="co6">{[db-path]}/recovery.conf</variable>
<!-- Hosts -->
<variable key="host-os" keyword="default">u14</variable>
<variable key="host-os" keyword="co6">co6</variable>
<variable key="host-user">vagrant</variable>
<variable key="host-mount">/backrest:/backrest</variable>
<variable key="image-user">pgbackrest/vagrant</variable>
<variable key="host-db-master">db-master</variable>
<variable key="host-db-master-user">{[host-user]}</variable>
<variable key="host-db-master-image">{[image-user]}/{[host-os]}-db-{[pg-version]}-doc-pre</variable>
<variable key="host-db-master-mount">{[host-mount]}</variable>
<variable key="host-db-standby">db-standby</variable>
<variable key="host-db-standby-user">{[host-db-master-user]}</variable>
<variable key="host-db-standby-image">{[host-db-master-image]}</variable>
<variable key="host-db-standby-mount">{[host-mount]}</variable>
<variable key="host-backup">backup</variable>
<variable key="host-backup-user">{[host-user]}</variable>
<variable key="host-backup-image">{[image-user]}/{[host-os]}-backup-doc-pre</variable>
<variable key="host-backup-mount">{[host-mount]}</variable>
<!-- Commands for various operations -->
<variable key="cmd-backup-last">ls -1 {[backrest-repo-path]}/backup/demo | tail -4 | head -1</variable>
<!-- Data used to demonstrate backup/restore operations -->
<variable key="test-table-data">Important Data</variable>
<!-- Database cluster commmands -->
<variable key="db-cluster-wait">sleep 1</variable>
<variable key="db-cluster-create" keyword="default">pg_createcluster {[pg-version]} {[postgres-cluster-demo]}</variable>
<variable key="db-cluster-create" keyword="co6">service postgresql-{[pg-version]} initdb</variable>
<variable key="db-cluster-start" keyword="default">pg_ctlcluster {[pg-version]} {[postgres-cluster-demo]} start</variable>
<variable key="db-cluster-start" keyword="co6">service postgresql-{[pg-version]} start</variable>
<variable key="db-cluster-stop" keyword="default">pg_ctlcluster {[pg-version]} {[postgres-cluster-demo]} stop</variable>
<variable key="db-cluster-stop" keyword="co6">service postgresql-{[pg-version]} stop</variable>
<variable key="db-cluster-restart" keyword="default">pg_ctlcluster {[pg-version]} {[postgres-cluster-demo]} restart</variable>
<variable key="db-cluster-restart" keyword="co6">service postgresql-{[pg-version]} restart</variable>
<variable key="db-cluster-reload" keyword="default">pg_ctlcluster {[pg-version]} {[postgres-cluster-demo]} reload</variable>
<variable key="db-cluster-reload" keyword="co6">service postgresql-{[pg-version]} reload</variable>
</variable-list>
<!-- SECTION => INTRODUCTION -->
<section id="introduction">
<title>Introduction</title>
<p>This user guide is intended to be followed sequentially from beginning to end &amp;mdash; each section depends on the last. For example, the <link section="/backup">Backup</link> section relies on setup that is performed in the <link section="/quickstart">Quick Start</link> section. Once <backrest/> is up and running then skipping around is possible but following the user guide in order is recommended the first time through.</p>
<p>Although the examples are targeted at {[user-guide-os]} and <postgres/> {[pg-version]}, it should be fairly easy to apply this guide to any Unix distribution and <postgres/> version. Note that only 64-bit distributions are currently supported due to 64-bit operations in the Perl code. The only OS-specific commands are those to create, start, stop, and drop <postgres/> clusters. The <backrest/> commands will be the same on any Unix system though the locations to install Perl libraries and executables may vary.
Configuration information and documentation for PostgreSQL can be found in the <postgres/> <link url='http://www.postgresql.org/docs/{[pg-version]}/static/index.html'>Manual</link>.</p>
<p>A somewhat novel approach is taken to documentation in this user guide. Each command is run on a virtual machine when the documentation is built from the XML source. This means you can have a high confidence that the commands work correctly in the order presented. Output is captured and displayed below the command when appropriate. If the output is not included it is because it was deemed not relevant or was considered a distraction from the narrative.</p>
<p>All commands are intended to be run as an unprivileged user that has sudo privileges for both the <user>root</user> and <user>postgres</user> users. It's also possible to run the commands directly as their respective users without modification and in that case the <cmd>sudo</cmd> commands can be stripped off.</p>
</section>
<!-- SECTION => CONCEPTS -->
<section id="concept">
<title>Concepts</title>
<p>The following concepts are defined as they are relevant to <backrest/>, <postgres/>, and this user guide.</p>
<!-- SECTION => CONCEPTS - BACKUP -->
<section id="backup">
<title>Backup</title>
<p>A backup is a consistent copy of a database cluster that can be restored to recover from a hardware failure, to perform Point-In-Time Recovery, or to bring up a new standby.</p>
<p><b>Full Backup</b>: <backrest/> copies the entire contents of the database cluster to the backup server. The first backup of the database cluster is always a Full Backup. <backrest/> is always able to restore a full backup directly. The full backup does not depend on any files outside of the full backup for consistency.</p>
<p><b>Differential Backup</b>: <backrest/> copies only those database cluster files that have changed since the last full backup. <backrest/> restores a differential backup by copying all of the files in the chosen differential backup and the appropriate unchanged files from the previous full backup. The advantage of a differential backup is that it requires less disk space than a full backup, however, the differential backup and the full backup must both be valid to restore the differential backup.</p>
<p><b>Incremental Backup</b>: <backrest/> copies only those database cluster files that have changed since the last backup (which can be another incremental backup, a differential backup, or a full backup). As an incremental backup only includes those files changed since the prior backup, they are generally much smaller than full or differential backups. As with the differential backup, the incremental backup depends on other backups to be valid to restore the incremental backup. Since the incremental backup includes only those files since the last backup, all prior incremental backups back to the prior differential, the prior differential backup, and the prior full backup must all be valid to perform a restore of the incremental backup. If no differential backup exists then all prior incremental backups back to the prior full backup, which must exist, and the full backup itself must be valid to restore the incremental backup.</p>
</section>
<!-- SECTION => CONCEPTS - RESTORE -->
<section id="restore">
<title>Restore</title>
<p>A restore is the act of copying a backup to a system where it will be started as a live database cluster. A restore requires the backup files and one or more WAL segments in order to work correctly.</p>
</section>
<!-- SECTION => CONCEPTS - WAL -->
<section id="wal">
<title>Write Ahead Log (WAL)</title>
<p>WAL is the mechanism that <postgres/> uses to ensure that no committed changes are lost. Transactions are written sequentially to the WAL and a transaction is considered to be committed when those writes are flushed to disk. Afterwards, a background process writes the changes into the main database cluster files (also known as the heap). In the event of a crash, the WAL is replayed to make the database consistent.</p>
<p>WAL is conceptually infinite but in practice is broken up into individual 16MB files called segments. WAL segments follow the naming convention <id>0000000100000A1E000000FE</id> where the first 8 hexadecimal digits represent the timeline and the next 16 digits are the logical sequence number (LSN).</p>
</section>
</section>
<!-- SECTION => INSTALLATION -->
<section id="installation">
<title>Installation</title>
<host-add name="{[host-db-master]}" user="{[host-db-master-user]}" image="{[host-db-master-image]}" os="{[host-os]}" mount="{[host-db-master-mount]}">
<execute user="{[host-user]}">
<exe-cmd>mkdir /home/{[host-user]}/pgbackrest-release-{[version]}</exe-cmd>
</execute>
<execute user="{[host-user]}">
<exe-cmd>cp -r /backrest/bin /home/{[host-user]}/pgbackrest-release-{[version]}</exe-cmd>
</execute>
<execute user="{[host-user]}">
<exe-cmd>cp -r /backrest/lib /home/{[host-user]}/pgbackrest-release-{[version]}</exe-cmd>
</execute>
</host-add>
<p keyword="default"><backrest/> is written in Perl which is included with {[user-guide-os]} by default. The <id>DBD::Pg</id> module must also be installed.</p>
<execute-list host="{[host-db-master]}" keyword="default">
<title>Install the <id>DBD::Pg</id> module</title>
<execute user="root">
<exe-cmd>apt-get install libdbd-pg-perl</exe-cmd>
<exe-cmd-extra>-y</exe-cmd-extra>
</execute>
</execute-list>
<p keyword="co6"><backrest/> is written in Perl which is not included with {[user-guide-os]} by default, however all required modules are available as standard packages.</p>
<execute-list host="{[host-db-master]}" keyword="co6">
<title>Install required Perl packages</title>
<execute user="root">
<exe-cmd>yum install perl perl-Time-HiRes perl-parent perl-JSON
perl-Digest-SHA perl-DBD-Pg</exe-cmd>
<exe-cmd-extra>-y</exe-cmd-extra>
</execute>
</execute-list>
<p keyword="default">{[user-guide-os]} packages for <backrest/> are available, but if they are not provided on your distribution/version it is easy to download the source and install manually.</p>
<p keyword="co6">{[user-guide-os]} packages for <backrest/> are available from <link url="{[crunchy-url-base]}">Crunchy Data</link> or <link url="http://yum.postgresql.org">yum.postgresql.org</link>, but it is also easy to download the source and install manually.</p>
<execute-list host="{[host-db-master]}">
<title>Download version <id>{[version]}</id> of <backrest/></title>
<execute user="{[host-user]}" skip="y">
<exe-cmd>wget -q -O -
{[github-url-release]}/{[version]}.tar.gz |
tar zx -C ~</exe-cmd>
</execute>
</execute-list>
<p>If <backrest/> has been installed before it's best to be sure that no prior copies of it are still installed. Depending on how old the version of pgBackRest is it may have been installed in a few different locations. The following commands will remove all prior versions of pgBackRest.</p>
<execute-list host="{[host-db-master]}">
<title>Remove prior <backrest/> installations</title>
<execute user="root">
<exe-cmd>rm -f /usr/bin/pgbackrest</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>rm -f /usr/bin/pg_backrest</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>rm -rf /usr/lib/perl5/BackRest</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>rm -rf {[perl-lib-path]}/BackRest</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>rm -rf /usr/lib/perl5/pgBackRest</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>rm -rf {[perl-lib-path]}/pgBackRest</exe-cmd>
</execute>
</execute-list>
<p>The new version can now be installed.</p>
<execute-list host="{[host-db-master]}">
<title>Install <backrest/></title>
<execute user="root">
<exe-cmd>cp -r ~/pgbackrest-release-{[version]}/lib/pgBackRest
{[perl-lib-path]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>find {[perl-lib-path]}/pgBackRest -type f -exec chmod 644 {} +</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>find {[perl-lib-path]}/pgBackRest -type d -exec chmod 755 {} +</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>cp ~/pgbackrest-release-{[version]}/bin/{[project-exe]} {[perl-bin-path]}/{[project-exe]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chmod 755 {[perl-bin-path]}/{[project-exe]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>mkdir -m 770 /var/log/pgbackrest</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chown postgres:postgres /var/log/pgbackrest</exe-cmd>
</execute>
</execute-list>
<p><backrest/> should now be properly installed but it is best to check. If any dependencies were missed then you will get an error when running <backrest/> from the command line.</p>
<execute-list host="{[host-db-master]}">
<title>Make sure the installation worked</title>
<execute output="y" filter="n">
<exe-cmd>{[project-exe]}</exe-cmd>
</execute>
</execute-list>
</section>
<!-- SECTION => QUICKSTART -->
<section id="quickstart" depend="installation">
<title>Quick Start</title>
<p>The Quick Start section will cover basic configuration of <backrest/> and <postgres/> and introduce the <cmd>backup</cmd>, <cmd>restore</cmd>, and <cmd>info</cmd> commands.</p>
<!-- SECTION => QUICKSTART - SETUP DEMO CLUSTER -->
<section id="setup-demo-cluster">
<title>Setup Demo Cluster</title>
<p>Creating the demo cluster is optional but is strongly recommended, especially for new users, since the example commands in the user guide reference the demo cluster; the examples assume the demo cluster is running on the default port (i.e. 5432). The cluster will not be started until a later section because there is still some configuration to do.</p>
<execute-list host="{[host-db-master]}">
<title>Create the demo cluster</title>
<execute user="root" output="y" filter="n">
<exe-cmd>{[db-cluster-create]}</exe-cmd>
</execute>
</execute-list>
<p>By default <postgres/> will only accept local connections. The examples in this guide will require connections from other servers so <pg-option>listen_addresses</pg-option> is configured to listen on all interfaces. This may not be appropriate for secure installations.</p>
<postgres-config host="{[host-db-master]}" file="{[postgres-config-demo]}">
<title>Set <pg-option>listen_addresses</pg-option></title>
<postgres-config-option key="listen_addresses">'*'</postgres-config-option>
</postgres-config>
<p>For demonstration purposes the <pg-option>log_line_prefix</pg-option> setting will be minimally configured. This keeps the log output as brief as possible to better illustrate important information.</p>
<postgres-config host="{[host-db-master]}" file="{[postgres-config-demo]}">
<title>Set <pg-option>log_line_prefix</pg-option></title>
<postgres-config-option key="log_line_prefix">''</postgres-config-option>
</postgres-config>
<p keyword="co6">By default {[user-guide-os]} includes the day of the week in the log filename. This makes automating the user guide a bit more complicated so the <pg-option>log_filename</pg-option> is set to a constant.</p>
<postgres-config host="{[host-db-master]}" keyword="co6" file="{[postgres-config-demo]}">
<title>Set <pg-option>log_filename</pg-option></title>
<postgres-config-option key="log_filename">'postgresql.log'</postgres-config-option>
</postgres-config>
</section>
<!-- SECTION => QUICKSTART - CONFIGURE STANZA -->
<section id="configure-stanza" depend="setup-demo-cluster">
<title>Configure Cluster Stanza</title>
<option-description key="stanza"/>
<p>The name 'demo' describes the purpose of this cluster accurately so that will also make a good stanza name.</p>
<p><backrest/> needs to know where the base data directory for the <postgres/> cluster is located. The path can be requested from <postgres/> directly but in a recovery scenario the <postgres/> process will not be available. During backups the value supplied to <backrest/> will be compared against the path that <postgres/> is running on and they must be equal or the backup will return an error. Make sure that <br-option>db-path</br-option> is exactly equal to <pg-option>data_directory</pg-option> in <file>postgresql.conf</file>.</p>
<p>By default {[user-guide-os]} stores clusters in <path>{[db-path-default]}</path> so it is easy to determine the correct path for the data directory.</p>
<p>When creating the <file>{[backrest-config-demo]}</file> file, the database owner (usually <id>postgres</id>) must be granted read privileges.</p>
<backrest-config host="{[host-db-master]}" file="{[backrest-config-demo]}">
<title>Configure the <postgres/> cluster data directory</title>
<backrest-config-option section="demo" key="db-path">{[db-path]}</backrest-config-option>
<backrest-config-option section="global" key="log-level-stderr">off</backrest-config-option>
</backrest-config>
<p><backrest/> configuration files follow the Windows INI convention. Sections are denoted by text in brackets and key/value pairs are contained in each section. Lines beginning with <id>#</id> are ignored and can be used as comments.</p>
</section>
<!-- SECTION => QUICKSTART - CREATE REPOSITORY -->
<section id="create-repository" depend="configure-stanza">
<title>Create the Repository</title>
<option-description key="repo-path"/>
<p>For this demonstration the repository will be stored on the same host as the <postgres/> server. This is the simplest configuration and is useful in cases where traditional backup software is employed to backup the database host.</p>
<execute-list host="{[host-db-master]}">
<title>Create the <backrest/> repository</title>
<execute user="root">
<exe-cmd>mkdir {[backrest-repo-path]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chmod 750 {[backrest-repo-path]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chown postgres:postgres {[backrest-repo-path]}</exe-cmd>
</execute>
</execute-list>
<p>The repository path must be configured so <backrest/> knows where to find it.</p>
<backrest-config host="{[host-db-master]}" file="{[backrest-config-demo]}">
<title>Configure the <backrest/> repository path</title>
<backrest-config-option section="global" key="repo-path">{[backrest-repo-path]}</backrest-config-option>
</backrest-config>
</section>
<!-- SECTION => QUICKSTART - CONFIGURE ARCHIVING -->
<section id="configure-archiving" depend="create-repository">
<title>Configure Archiving</title>
<p>Backing up a running <postgres/> cluster requires WAL archiving to be enabled. Note that <i>at least</i> one WAL segment will be created during the backup process even if no explicit writes are made to the cluster.</p>
<postgres-config host="{[host-db-master]}" file="{[postgres-config-demo]}">
<title>Configure archive settings</title>
<postgres-config-option key="archive_command">'{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} archive-push %p'</postgres-config-option>
<postgres-config-option key="archive_mode">on</postgres-config-option>
<postgres-config-option key="wal_level">hot_standby</postgres-config-option>
<postgres-config-option key="max_wal_senders">3</postgres-config-option>
</postgres-config>
<p>The <pg-option>wal_level</pg-option> setting must be set to <pg-setting>archive</pg-setting> at a minimum but <pg-setting>hot_standby</pg-setting> and <pg-setting>logical</pg-setting> also work fine for backups. Setting <pg-option>wal_level</pg-option> to <pg-setting>hot_standy</pg-setting> and increasing <pg-option>max_wal_senders</pg-option> is a good idea even if you do not currently run a hot standby as this will allow them to be added later without restarting the master cluster.</p>
<p>The <postgres/> cluster must be restarted after making these changes and before performing a backup.</p>
<execute-list host="{[host-db-master]}">
<title>Restart the {[postgres-cluster-demo]} cluster</title>
<execute user="root">
<exe-cmd>{[db-cluster-restart]}</exe-cmd>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
</execute-list>
<p>When archiving a WAL segment is expected to take more than 60 seconds (the default) then the <br-option>archive-timeout</br-option> option should be increased.</p>
</section>
<!-- SECTION => QUICKSTART - STANZA CREATE -->
<section id="create-stanza" depend="configure-archiving">
<title>Create the Stanza</title>
<p>To create the required stanza data <cmd>stanza-create</cmd> must be run on the host where the repository is located. The <cmd>check</cmd> command is invoked for <postgres/> versions &gt;= 9.1 to ensure archiving and backups are also properly configured. For older versions of <postgres/>, it is recommended that activity be generated by the user if there have been no writes since the last xlog switch and then the <cmd>check</cmd> command run manually.</p>
<execute-list host="{[host-db-master]}">
<title>Create the Stanza and Check the Configuration</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info stanza-create</exe-cmd>
<exe-highlight>successfully created</exe-highlight>
</execute>
</execute-list>
</section>
<!-- SECTION => QUICKSTART - CHECK CONFIGURATION -->
<section id="check-configuration" depend="create-stanza">
<title>Check the Configuration</title>
<cmd-description key="check"/>
<execute-list host="{[host-db-master]}">
<title>Check the configuration</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info check</exe-cmd>
<exe-highlight> successfully stored in the archive at </exe-highlight>
</execute>
</execute-list>
<!-- Decided not to show the error in this part of the user guide but added as a debug statement for reference. -->
<execute-list keyword="debug" host="{[host-db-master]}">
<title>Example of an invalid configuration</title>
<execute output="y" err-expect="157">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --archive-timeout=.1 check</exe-cmd>
<exe-highlight>could not find WAL segment|did not reach the archive</exe-highlight>
</execute>
</execute-list>
</section>
<!-- SECTION => QUICKSTART - PERFORM BACKUP -->
<section id="perform-backup" depend="configure-archiving">
<title>Perform a Backup</title>
<p>To perform a backup of the <postgres/> cluster run <backrest/> with the <cmd>backup</cmd> command.</p>
<execute-list host="{[host-db-master]}">
<title>Backup the {[postgres-cluster-demo]} cluster</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]}
--log-level-console=info backup</exe-cmd>
<exe-highlight>no prior backup exists|full backup size</exe-highlight>
</execute>
<execute show="n" variable-key="backup-full-first">
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
</execute>
</execute-list>
<p>By default <backrest/> will attempt to perform an incremental backup. However, an incremental backup must be based on a full backup and since no full backup existed <backrest/> ran a full backup instead.</p>
<p>The <br-option>type</br-option> option can be used to specify a full or differential backup.</p>
<execute-list host="{[host-db-master]}">
<title>Differential backup of the {[postgres-cluster-demo]} cluster</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=diff
--log-level-console=info backup</exe-cmd>
<exe-highlight>diff backup size</exe-highlight>
</execute>
</execute-list>
<p>This time there was no warning because a full backup already existed. While incremental backups can be based on a full <i>or</i> differential backup, differential backups must be based on a full backup. A full backup can be performed by running the <cmd>backup</cmd> command with <br-setting>{[dash]}-type=full</br-setting>.</p>
<p>More information about the <cmd>backup</cmd> command can be found in the <link section="/backup">Backup</link> section.</p>
</section>
<!-- SECTION => QUICKSTART - SCHEDULE BACKUP -->
<section id="schedule-backup">
<title>Schedule a Backup</title>
<p>Backups can be scheduled with utilities such as cron.</p>
<p>In the following example, two cron jobs are configured to run; full backups are scheduled for 6:30 AM every Sunday with differential backups scheduled for 6:30 AM Monday through Saturday. If this crontab is installed for the first time mid-week, then pgBackRest will run a full backup the first time the differential job is executed, followed the next day by a differential backup.</p>
<code-block title="crontab">
#m h dom mon dow command
30 06 * * 0 pgbackrest --type=full --stanza=demo backup
30 06 * * 1-6 pgbackrest --type=diff --stanza=demo backup
</code-block>
<p>Once backups are scheduled it's important to configure retention so backups are expired on a regular schedule, see <link section="/retention">Retention</link>.</p>
</section>
<!-- SECTION => QUICKSTART - BACKUP INFO -->
<section id="backup-info" depend="perform-backup">
<title>Backup Information</title>
<p>Use the <cmd>info</cmd> command to get information about backups.</p>
<execute-list host="{[host-db-master]}">
<title>Get info for the {[postgres-cluster-demo]} cluster</title>
<execute filter="n" output="y">
<exe-cmd>{[project-exe]} info</exe-cmd>
<exe-highlight>(full|incr|diff) backup</exe-highlight>
</execute>
</execute-list>
<p>The backups are displayed oldest to newest. The oldest backup will <i>always</i> be a full backup (indicated by an <id>F</id> at the end of the label) but the newest backup can be full, differential (ends with <id>D</id>), or incremental (ends with <id>I</id>).</p>
<p>The '<id>start / stop timestamp</id>' defines the time period when the backup ran. The '<id>stop timestamp</id>' can be used to determine the backup to use when performing Point-In-Time Recovery. More information about Point-In-Time Recovery can be found in the <link section="/pitr">Point-In-Time Recovery</link> section.</p>
<p>The '<id>database size</id>' is the full uncompressed size of the database while '<id>backup size</id>' is the amount of data actually backed up (these will be the same for full backups). The '<id>repository size</id>' includes all the files from this backup and any referenced backups that are required to restore the database while '<id>repository backup size</id>' includes only the files in this backup (these will also be the same for full backups). Repository sizes reflect compressed file sizes if compression is enabled in <backrest/> or the filesystem.</p>
<p>The '<id>backup reference list</id>' contains the additional backups that are required to restore this backup.</p>
</section>
<!-- SECTION => QUICKSTART - PERFORM RESTORE -->
<section id="perform-restore" depend="perform-backup">
<title>Restore a Backup</title>
<p>Backups can protect you from a number of disaster scenarios, the most common of which are hardware failure and data corruption. The easiest way to simulate data corruption is to remove an important <postgres/> cluster file.</p>
<execute-list host="{[host-db-master]}">
<title>Stop the {[postgres-cluster-demo]} cluster and delete the <file>pg_control</file> file</title>
<execute user="root">
<exe-cmd>{[db-cluster-stop]}</exe-cmd>
</execute>
<execute>
<exe-cmd>rm {[db-path]}/global/pg_control</exe-cmd>
</execute>
</execute-list>
<p>Starting the cluster without this important file will result in an error.</p>
<execute-list host="{[host-db-master]}">
<title>Attempt to start the corrupted {[postgres-cluster-demo]} cluster</title>
<execute keyword="default" user="root" output="y" err-expect="1">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
<exe-highlight>could not find the database system</exe-highlight>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
<execute keyword="co6" user="root" show="n">
<exe-cmd>rm -f {[postgres-log-pgstartup-demo]}</exe-cmd>
</execute>
<execute keyword="co6" user="root" err-expect="1">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
</execute>
<execute keyword="co6" user="root" output="y">
<exe-cmd>cat {[postgres-log-pgstartup-demo]}</exe-cmd>
<exe-highlight-type>error</exe-highlight-type>
<exe-highlight>could not find the database system</exe-highlight>
</execute>
</execute-list>
<p>To restore a backup of the <postgres/> cluster run <backrest/> with the <cmd>restore</cmd> command. The cluster needs to be stopped (in this case it is already stopped) and all files must be removed from the <postgres/> data directory.</p>
<execute-list host="{[host-db-master]}">
<title>Remove old files from {[postgres-cluster-demo]} cluster</title>
<execute>
<exe-cmd>find {[db-path]} -mindepth 1 -delete</exe-cmd>
</execute>
</execute-list>
<execute-list host="{[host-db-master]}">
<title>Restore the {[postgres-cluster-demo]} cluster and start <postgres/></title>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} restore</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
</execute-list>
<p>This time the cluster started successfully since the restore replaced the missing <file>pg_control</file> file.</p>
<p>More information about the <cmd>restore</cmd> command can be found in the <link section="/restore">Restore</link> section.</p>
</section>
</section>
<!-- SECTION => BACKUP -->
<section id="backup" depend="/quickstart/configure-archiving">
<title>Backup</title>
<p>The Backup section introduces additional <cmd>backup</cmd> command features.</p>
<!-- SECTION => BACKUP - START-FAST -->
<section id="option-start-fast">
<title>Fast Start Option</title>
<p>By default <backrest/> will wait for the next regularly scheduled checkpoint before starting a backup. Depending on the <pg-option>checkpoint_timeout</pg-option> and <pg-option>checkpoint_segments</pg-option> settings in <postgres/> it may be quite some time before a checkpoint completes and the backup can begin.</p>
<execute-list host="{[host-db-master]}">
<title>Incremental backup of the {[postgres-cluster-demo]} cluster with the regularly scheduled checkpoint</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr
--log-level-console=info backup</exe-cmd>
<exe-highlight>backup begins after the next regular checkpoint completes</exe-highlight>
</execute>
</execute-list>
<p>When <br-setting>{[dash]}-start-fast</br-setting> is passed on the command-line or <br-setting>start-fast=y</br-setting> is set in <file>{[backrest-config-demo]}</file> an immediate checkpoint is requested and the backup will start more quickly. This is convenient for testing and for ad-hoc backups. For instance, if a backup is being taken at the beginning of a release window it makes no sense to wait for a checkpoint. Since regularly scheduled backups generally only happen once per day it is unlikely that enabling the <br-option>start-fast</br-option> in <file>{[backrest-config-demo]}</file> will negatively affect performance, however for high-volume transactional systems you may want to pass <br-setting>{[dash]}-start-fast</br-setting> on the command-line instead. Alternately, it is possible to override the setting in the configuration file by passing <br-setting>{[dash]}-no-start-fast</br-setting> on the command-line.</p>
<backrest-config host="{[host-db-master]}" file="{[backrest-config-demo]}">
<title>Enable the <br-option>start-fast</br-option> option</title>
<backrest-config-option section="global" key="start-fast">y</backrest-config-option>
</backrest-config>
<execute-list host="{[host-db-master]}">
<title>Incremental backup of the {[postgres-cluster-demo]} cluster with an immediate checkpoint</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr
--log-level-console=info backup</exe-cmd>
<exe-highlight>backup begins after the requested immediate checkpoint completes</exe-highlight>
</execute>
</execute-list>
</section>
<!-- SECTION => BACKUP - STOP-AUTO -->
<section id="option-stop-auto">
<title>Automatic Stop Option</title>
<p>Sometimes <backrest/> will exit unexpectedly and the backup in progress on the <postgres/> cluster will not be properly stopped. <backrest/> exits as quickly as possible when an error occurs so that the cause can be reported accurately and is not masked by another problem that might happen during a more extensive cleanup.</p>
<p>Here an error is intentionally caused by removing repository permissions.</p>
<execute-list host="{[host-db-master]}">
<title>Revoke write privileges in the <backrest/> repository and attempt a backup</title>
<execute user="root">
<exe-cmd>chmod 550 {[backrest-repo-path]}/temp</exe-cmd>
</execute>
<execute output="y" err-expect="122">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr
--log-level-console=info backup</exe-cmd>
<exe-highlight>ERROR:</exe-highlight>
</execute>
</execute-list>
<p>Even when the permissions are fixed <backrest/> will still be unable to perform a backup because the <postgres/> cluster is stuck in backup mode.</p>
<execute-list host="{[host-db-master]}">
<title>Restore write privileges in the <backrest/> repository and attempt a backup</title>
<execute user="root">
<exe-cmd>chmod 750 {[backrest-repo-path]}/temp</exe-cmd>
</execute>
<execute output="y" err-expect="132">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr
--log-level-console=info backup</exe-cmd>
<exe-highlight>ERROR:</exe-highlight>
</execute>
</execute-list>
<p>Enabling the <br-option>stop-auto</br-option> option allows <backrest/> to stop the current backup if it detects that no other <backrest/> backup process is running.</p>
<backrest-config host="{[host-db-master]}" file="{[backrest-config-demo]}">
<title>Enable the <br-option>stop-auto</br-option> option</title>
<backrest-config-option section="global" key="stop-auto">y</backrest-config-option>
</backrest-config>
<p>Now <backrest/> will stop the old backup and start a new one so the process completes successfully.</p>
<execute-list host="{[host-db-master]}">
<title>Perform an incremental backup</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr
--log-level-console=info backup</exe-cmd>
<exe-highlight>cluster is already in backup mode|backup begins after the requested immediate checkpoint completes</exe-highlight>
</execute>
</execute-list>
<p>Although useful this feature may not be appropriate when another third-party backup solution is being used to take online backups as <backrest/> will not recognize that the other software is running and may terminate a backup started by that software. However, it would be unusual to run more than one third-party backup solution at the same time so this is not likely to be a problem.</p>
<p>Note that <id>pg_dump</id> and <id>pg_base_backup</id> do not take online backups so are not affected. It is safe to run them in conjunction with <backrest/>.</p>
</section>
<!-- SECTION => BACKUP - ARCHIVE-TIMEOUT -->
<section id="option-archive-timeout">
<title>Archive Timeout</title>
<p>During an online backup, <backrest/> waits for WAL segments that are required to make the backup consistent to be archived. This wait time is governed by the <br-option>archive-timeout</br-option> option which defaults to 60 seconds. If archiving an individual segment is known to take longer, then this option should be increased.</p>
</section>
</section>
<!-- SECTION => RETENTION -->
<section id="retention" depend="quickstart/perform-backup">
<title>Retention</title>
<p>Generally it is best to retain as many backups as possible to provide a greater window for <link section="/pitr">Point-in-Time Recovery</link>, but practical concerns such as disk space must also be considered. Retention options remove older backups once they are no longer needed.</p>
<!-- SECTION => RETENTION - FULL -->
<section id="full">
<title>Full Backup Retention</title>
<p>Set <br-option>retention-full</br-option> to the number of full backups required. New backups must be completed before expiration will occur &amp;mdash; that means if <br-setting>retention-full=2</br-setting> then there will be three full backups stored before the oldest one is expired.</p>
<backrest-config host="{[host-db-master]}" file="{[backrest-config-demo]}">
<title>Configure <br-option>retention-full</br-option></title>
<backrest-config-option section="global" key="retention-full">2</backrest-config-option>
</backrest-config>
<p>Backup <br-setting>retention-full=2</br-setting> but currently there is only one full backup so the next full backup to run will not expire any full backups.</p>
<execute-list host="{[host-db-master]}">
<title>Perform a full backup</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=full
--log-level-console=detail backup</exe-cmd>
<exe-highlight>archive retention on backup {[backup-full-first]}|remove archive</exe-highlight>
</execute>
<execute show="n" variable-key="backup-full-second">
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
</execute>
</execute-list>
<p>Archive <i>is</i> expired because WAL segments were generated before the oldest backup. These are not useful for recovery &amp;mdash; only WAL segments generated after a backup can be used to recover that backup.</p>
<execute-list host="{[host-db-master]}">
<title>Perform a full backup</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=full
--log-level-console=info backup</exe-cmd>
<exe-highlight>expire full backup set\: {[backup-full-first]}|archive retention on backup {[backup-full-second]}|remove archive</exe-highlight>
</execute>
</execute-list>
<p>The <id>{[backup-full-first]}</id> full backup is expired and archive retention is based on the <id>{[backup-full-second]}</id> which is now the oldest full backup.</p>
</section>
<!-- SECTION => RETENTION - DIFF -->
<section id="diff" depend="full">
<title>Differential Backup Retention</title>
<p>Set <br-option>retention-diff</br-option> to the number of differential backups required. Differentials only rely on the prior full backup so it is possible to create a <quote>rolling</quote> set of differentials for the last day or more. This allows quick restores to recent points-in-time but reduces overall space consumption.</p>
<backrest-config host="{[host-db-master]}" file="{[backrest-config-demo]}">
<title>Configure <br-option>retention-diff</br-option></title>
<backrest-config-option section="global" key="retention-diff">1</backrest-config-option>
</backrest-config>
<p>Backup <br-setting>retention-diff=1</br-setting> so two differentials will need to be performed before one is expired. An incremental backup is added to demonstrate incremental expiration. Incremental backups cannot be expired independently &amp;mdash; they are always expired with their related full or differential backup.</p>
<execute-list host="{[host-db-master]}">
<title>Perform differential and incremental backups</title>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=diff backup</exe-cmd>
</execute>
<execute show="n" variable-key="backup-diff-second">
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
</execute>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=incr backup</exe-cmd>
</execute>
</execute-list>
<p>Now performing a differential backup will expire the previous differential and incremental backups leaving only one differential backup.</p>
<execute-list host="{[host-db-master]}">
<title>Perform a differential backup</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=diff
--log-level-console=info backup</exe-cmd>
<exe-highlight>expire diff backup set: {[backup-diff-second]}</exe-highlight>
</execute>
</execute-list>
</section>
<!-- SECTION => RETENTION - ARCHIVE -->
<section id="archive" depend="diff">
<title>Archive Retention</title>
<p>Although <backrest/> automatically removes archived WAL segments when expiring backups (the default expires WAL for full backups based on the <br-option>retention-full</br-option> option), it may be useful to expire archive more aggressively to save disk space. Note that full backups are treated as differential backups for the purpose of differential archive retention.</p>
<p>Expiring archive will never remove WAL segments that are required to make a backup consistent. However, since Point-in-Time-Recovery (PITR) only works on a continuous WAL stream, care should be taken when aggressively expiring archive outside of the normal backup expiration process.</p>
<backrest-config host="{[host-db-master]}" file="{[backrest-config-demo]}">
<title>Configure <br-option>retention-diff</br-option></title>
<backrest-config-option section="global" key="retention-diff">2</backrest-config-option>
</backrest-config>
<execute-list host="{[host-db-master]}">
<title>Perform differential backup</title>
<execute show="n" variable-key="backup-diff-first">
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
</execute>
<!-- Push a few xlogs to make the example below more interesting -->
<execute show="n">
<exe-cmd>psql -c "
select pg_create_restore_point('generate WAL'); select pg_switch_xlog();
select pg_create_restore_point('generate WAL'); select pg_switch_xlog();"</exe-cmd>
</execute>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=diff
--log-level-console=info backup</exe-cmd>
<exe-highlight>new backup label</exe-highlight>
</execute>
<execute show="n" variable-key="backup-diff-second">
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
</execute>
</execute-list>
<execute-list host="{[host-db-master]}">
<title>Expire archive</title>
<execute output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --log-level-console=detail
--retention-archive-type=diff --retention-archive=1 expire</exe-cmd>
<exe-highlight>archive retention on backup {[backup-diff-first]}|remove archive</exe-highlight>
</execute>
</execute-list>
<p>The <id>{[backup-diff-first]}</id> differential backup has archived WAL segments that must be retained to make the older backups consistent even though they cannot be played any further forward with PITR. WAL segments generated after <id>{[backup-diff-first]}</id> but before <id>{[backup-diff-second]}</id> are removed. WAL segments generated after the new backup <id>{[backup-diff-second]}</id> remain and can be used for PITR.</p>
<p>Since full backups are considered differential backups for the purpose of differential archive retention, if a full backup is now performed with the same settings, only the archive for that full backup is retained for PITR.</p>
</section>
</section>
<!-- SECTION => RESTORE -->
<section id="restore" depend="quickstart/perform-backup">
<title>Restore</title>
<p>The Restore section introduces additional <cmd>restore</cmd> command features.</p>
<!-- SECTION => RESTORE - DELTA -->
<section id="option-delta">
<title>Delta Option</title>
<p><link section="/quickstart/perform-restore">Restore a Backup</link> in <link section="/quickstart">Quick Start</link> required the database cluster directory to be cleaned before the <cmd>restore</cmd> could be performed. The <br-option>delta</br-option> option allows <backrest/> to automatically determine which files in the database cluster directory can be preserved and which ones need to be restored from the backup &amp;mdash; it also <i>removes</i> files not present in the backup manifest so it will dispose of divergent changes. This is accomplished by calculating a <link url="https://en.wikipedia.org/wiki/SHA-1">SHA-1</link> cryptographic hash for each file in the database cluster directory. If the <id>SHA-1</id> hash does not match the hash stored in the backup then that file will be restored. This operation is very efficient when combined with the <br-option>process-max</br-option> option. Since the <postgres/> server is shut down during the restore, a larger number of processes can be used than might be desirable during a backup when the <postgres/> server is running.</p>
<execute-list host="{[host-db-master]}">
<title>Stop the {[postgres-cluster-demo]} cluster, perform delta restore</title>
<execute user="root">
<exe-cmd>{[db-cluster-stop]}</exe-cmd>
</execute>
<execute output="y" filter="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta
--log-level-console=detail restore</exe-cmd>
<exe-highlight>demo\/PG_VERSION - exists and matches backup|check\/clean db path|restore global\/pg_control</exe-highlight>
</execute>
</execute-list>
<execute-list host="{[host-db-master]}">
<title>Restart <postgres/></title>
<execute user="root">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
</execute-list>
</section>
<!-- SECTION => RESTORE - DELTA -->
<section id="option-db-include">
<title>Restore Selected Databases</title>
<p>There may be cases where it is desirable to selectively restore specific databases from a cluster backup. This could be done for performance reasons or to move selected databases to a machine that does not have enough space to restore the entire cluster backup.</p>
<p>To demonstrate this feature two databases are created: test1 and test2. A fresh backup is run so <backrest/> is aware of the new databases.</p>
<execute-list host="{[host-db-master]}">
<title>Create two test databases and perform a backup</title>
<execute output="y" filter="n">
<exe-cmd>
psql -c "create database test1;"
</exe-cmd>
</execute>
<execute output="y" filter="n">
<exe-cmd>
psql -c "create database test2;"
</exe-cmd>
</execute>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=incr backup</exe-cmd>
</execute>
</execute-list>
<p>Each test database will be seeded with tables and data to demonstrate that recovery works with selective restore.</p>
<execute-list host="{[host-db-master]}">
<title>Create a test table in each database</title>
<execute output="y" filter="n">
<exe-cmd>
psql -c "create table test1_table (id int);
insert into test1_table (id) values (1);" test1
</exe-cmd>
</execute>
<execute output="y" filter="n">
<exe-cmd>
psql -c "create table test2_table (id int);
insert into test2_table (id) values (2);" test2
</exe-cmd>
</execute>
</execute-list>
<p>One of the main reasons to use selective restore is to save space. The size of the test1 database is shown here so it can be compared with the disk utilization after a selective restore.</p>
<execute-list host="{[host-db-master]}">
<title>Show space used by test1 database</title>
<execute output="y" filter="n">
<exe-cmd>
du -sh {[db-path]}/base/16384
</exe-cmd>
</execute>
</execute-list>
<p>Stop the cluster and restore only the test2 database. Built-in databases (<id>template0</id>, <id>template1</id>, and <id>postgres</id>) are always restored.</p>
<execute-list host="{[host-db-master]}">
<title>Restore from last backup including only the test2 database</title>
<execute user="root">
<exe-cmd>{[db-cluster-stop]}</exe-cmd>
</execute>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta
{[dash]}-db-include=test2 restore</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
</execute-list>
<p>Once recovery is complete the test2 database will contain all previously created tables and data.</p>
<execute-list host="{[host-db-master]}">
<title>Demonstrate that the test2 database was recovered</title>
<execute output="y" filter="n">
<exe-cmd>
psql -c "select * from test2_table;" test2
</exe-cmd>
</execute>
</execute-list>
<p>The test1 database, despite successful recovery, is not accessible. This is because the entire database was restored as sparse, zeroed files. <postgres/> can successfully apply WAL on the zeroed files but the database as a whole will not be valid because key files contain no data. This is purposeful to prevent the database from being accidentally used when it might contain partial data that was applied during WAL replay.</p>
<execute-list host="{[host-db-master]}">
<title>Attempting to connect to the test1 database will produce an error</title>
<execute output="y" filter="n" err-expect="2">
<exe-cmd>
psql -c "select * from test1_table;" test1
</exe-cmd>
<exe-highlight>relation mapping file.*contains invalid data</exe-highlight>
</execute>
</execute-list>
<p>Since the test1 database is restored with sparse, zeroed files it will only require as much space as the amount of WAL that is written during recovery. While the amount of WAL generated during a backup and applied during recovery can be significant it will generally be a small fraction of the total database size, especially for large databases where this feature is most likely to be useful.</p>
<p>It is clear that the test1 database uses far less disk space during the selective restore than it would have if the entire database had been restored.</p>
<execute-list host="{[host-db-master]}">
<title>Show space used by test1 database after recovery</title>
<execute output="y" filter="n">
<exe-cmd>
du -sh {[db-path]}/base/16384
</exe-cmd>
</execute>
</execute-list>
<p>At this point the only action that can be taken on the invalid test1 database is <id>drop database</id>. <backrest/> does not automatically drop the database since this cannot be done until recovery is complete and the cluster is accessible.</p>
<execute-list host="{[host-db-master]}">
<title>Drop the test1 database</title>
<execute output="y" filter="n">
<exe-cmd>
psql -c "drop database test1;"
</exe-cmd>
</execute>
</execute-list>
<p>Now that the invalid test1 database has been dropped only the test2 and built-in databases remain.</p>
<execute-list host="{[host-db-master]}">
<title>List remaining databases</title>
<execute output="y" filter="n">
<exe-cmd>
psql -c "select oid, datname from pg_database order by oid;"
</exe-cmd>
<exe-highlight>test2</exe-highlight>
</execute>
</execute-list>
</section>
</section>
<!-- SECTION => PITR -->
<section id="pitr" depend="quickstart/perform-backup">
<title>Point-in-Time Recovery</title>
<p><link section="/quickstart/perform-restore">Restore a Backup</link> in <link section="/quickstart">Quick Start</link> performed default recovery, which is to play all the way to the end of the WAL stream. In the case of a hardware failure this is usually the best choice but for data corruption scenarios (whether machine or human in origin) Point-in-Time Recovery (PITR) is often more appropriate.</p>
<p>Point-in-Time Recovery (PITR) allows the WAL to be played from the last backup to a specified time, transaction id, or recovery point. For common recovery scenarios time-based recovery is arguably the most useful. A typical recovery scenario is to restore a table that was accidentally dropped or data that was accidentally deleted. Recovering a dropped table is more dramatic so that's the example given here but deleted data would be recovered in exactly the same way.</p>
<execute-list host="{[host-db-master]}">
<title>Backup the {[postgres-cluster-demo]} cluster and create a table with very important data</title>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=diff backup</exe-cmd>
</execute>
<execute output="y">
<exe-cmd>
psql -c "begin;
create table important_table (message text);
insert into important_table values ('{[test-table-data]}');
commit;
select * from important_table;"
</exe-cmd>
<exe-highlight>{[test-table-data]}</exe-highlight>
</execute>
</execute-list>
<p>It is important to represent the time as reckoned by <postgres/> and to include timezone offsets. This reduces the possibility of unintended timezone conversions and an unexpected recovery result.</p>
<execute-list host="{[host-db-master]}">
<title>Get the time from <postgres/></title>
<execute output="y" filter="n" variable-key="time-recovery-timestamp">
<exe-cmd>
psql -Atc "select current_timestamp"
</exe-cmd>
</execute>
</execute-list>
<p>Now that the time has been recorded the table is dropped. In practice finding the exact time that the table was dropped is a lot harder than in this example. It may not be possible to find the exact time, but some forensic work should be able to get you close.</p>
<execute-list host="{[host-db-master]}">
<title>Drop the important table</title>
<execute output="y" err-expect="1">
<exe-cmd>psql -c "begin;
drop table important_table;
commit;
select * from important_table;"</exe-cmd>
<exe-highlight>does not exist</exe-highlight>
</execute>
</execute-list>
<p>Now the restore can be performed with time-based recovery to bring back the missing table.</p>
<execute-list host="{[host-db-master]}">
<title>Stop <postgres/>, restore the {[postgres-cluster-demo]} cluster to <id>{[time-recovery-timestamp]}</id>, and display <file>recovery.conf</file></title>
<execute user="root">
<exe-cmd>{[db-cluster-stop]}</exe-cmd>
</execute>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta
{[dash]}-type=time "{[dash]}-target={[time-recovery-timestamp]}" restore</exe-cmd>
</execute>
<execute user="root" show="n">
<exe-cmd>rm {[postgres-log-demo]}</exe-cmd>
</execute>
<execute output="y">
<exe-cmd>cat {[postgres-recovery-demo]}</exe-cmd>
<exe-highlight>recovery_target_time</exe-highlight>
</execute>
</execute-list>
<p>The <file>recovery.conf</file> file has been automatically generated by <backrest/> so <postgres/> can be started immediately. Once <postgres/> has finished recovery the table will exist again and can be queried.</p>
<execute-list host="{[host-db-master]}">
<title>Start <postgres/> and check that the important table exists</title>
<execute user="root">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
<execute output="y">
<exe-cmd>psql -c "select * from important_table"</exe-cmd>
<exe-highlight>{[test-table-data]}</exe-highlight>
</execute>
</execute-list>
<p>The <postgres/> log also contains valuable information. It will indicate the time and transaction where the recovery stopped and also give the time of the last transaction to be applied.</p>
<execute-list host="{[host-db-master]}">
<title>Examine the <postgres/> log output</title>
<execute output="y">
<exe-cmd>cat {[postgres-log-demo]}</exe-cmd>
<exe-highlight>recovery stopping before|last completed transaction|starting point-in-time recovery</exe-highlight>
</execute>
</execute-list>
<p>This example was rigged to give the correct result. If a backup after the required time is chosen then <postgres/> will not be able to recover the lost table. <postgres/> can only play forward, not backward. To demonstrate this the important table must be dropped (again).</p>
<execute-list host="{[host-db-master]}">
<title>Drop the important table (again)</title>
<execute output="y" err-expect="1">
<exe-cmd>psql -c "begin;
drop table important_table;
commit;
select * from important_table;"</exe-cmd>
<exe-highlight>does not exist</exe-highlight>
</execute>
</execute-list>
<p>Now take a new backup and attempt recovery from the new backup.</p>
<execute-list host="{[host-db-master]}">
<title>Perform a backup then attempt recovery from that backup</title>
<execute show="n" variable-key="backup-last">
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
</execute>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr backup</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>{[db-cluster-stop]}</exe-cmd>
</execute>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta
{[dash]}-type=time "{[dash]}-target={[time-recovery-timestamp]}" restore</exe-cmd>
</execute>
<execute user="root" show="n">
<exe-cmd>rm {[postgres-log-demo]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
<execute output="y" err-expect="1">
<exe-cmd>psql -c "select * from important_table"</exe-cmd>
<exe-highlight>does not exist</exe-highlight>
</execute>
</execute-list>
<p>Looking at the log output it's not obvious that recovery failed to restore the table. The key is to look for the presence of the <quote>recovery stopping before...</quote> and <quote>last completed transaction...</quote> log messages. If they are not present then the recovery to the specified point-in-time was not successful.</p>
<execute-list host="{[host-db-master]}">
<title>Examine the <postgres/> log output to discover the recovery was not successful</title>
<execute output="y">
<exe-cmd>cat {[postgres-log-demo]}</exe-cmd>
<exe-highlight>starting point-in-time recovery|consistent recovery state reached</exe-highlight>
</execute>
</execute-list>
<p>Using an earlier backup will allow <postgres/> to play forward to the correct time. The <cmd>info</cmd> command can be used to find the next to last backup.</p>
<execute-list host="{[host-db-master]}">
<title>Get backup info for the {[postgres-cluster-demo]} cluster</title>
<execute filter="n" output="y">
<exe-cmd>{[project-exe]} info</exe-cmd>
<exe-highlight>{[backup-last]}</exe-highlight>
</execute>
</execute-list>
<p>The default behavior for restore is to use the last backup but an earlier backup can be specified with the <br-option>{[dash]}-set</br-option> option.</p>
<execute-list host="{[host-db-master]}">
<title>Stop <postgres/>, restore from the selected backup, and start <postgres/></title>
<execute user="root">
<exe-cmd>{[db-cluster-stop]}</exe-cmd>
</execute>
<execute>
<exe-cmd>
{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta
{[dash]}-type=time "{[dash]}-target={[time-recovery-timestamp]}"
{[dash]}-set={[backup-last]} restore
</exe-cmd>
</execute>
<execute user="root" show="n">
<exe-cmd>rm {[postgres-log-demo]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
<execute output="y">
<exe-cmd>psql -c "select * from important_table"</exe-cmd>
<exe-highlight>{[test-table-data]}</exe-highlight>
</execute>
</execute-list>
<p>Now the the log output will contain the expected <quote>recovery stopping before...</quote> and <quote>last completed transaction...</quote> messages showing that the recovery was successful.</p>
<execute-list host="{[host-db-master]}">
<title>Examine the <postgres/> log output for log messages indicating success</title>
<execute output="y">
<exe-cmd>cat {[postgres-log-demo]}</exe-cmd>
<exe-highlight>recovery stopping before|last completed transaction|starting point-in-time recovery</exe-highlight>
</execute>
</execute-list>
</section>
<!-- SECTION => BACKUP HOST -->
<section id="backup-host" depend="/quickstart/configure-archiving">
<title>Dedicated Backup Host</title>
<p>The configuration described in <link section="/quickstart">Quickstart</link> is suitable for simple installations but for enterprise configurations it is more typical to have a dedicated <host>backup</host> host. This separates the backups and WAL archive from the database server so <host>database</host> host failures have less impact. It is still a good idea to employ traditional backup software to backup the <host>backup</host> host.</p>
<!-- SECTION => BACKUP HOST - INSTALL/CONFIGURE -->
<section id="install-config">
<title>Installation and Configuration</title>
<host-add name="{[host-backup]}" user="{[host-backup-user]}" image="{[host-backup-image]}" os="{[host-os]}" mount="{[host-backup-mount]}">
<execute user="root">
<exe-cmd>cp -r /backrest/lib/pgBackRest {[perl-lib-path]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>find {[perl-lib-path]}/pgBackRest -type f -exec chmod 644 {} +</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>find {[perl-lib-path]}/pgBackRest -type d -exec chmod 755 {} +</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>cp /backrest/bin/{[project-exe]} {[perl-bin-path]}/{[project-exe]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chmod 755 {[perl-bin-path]}/{[project-exe]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>mkdir -m 770 /var/log/pgbackrest</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chown backrest:postgres /var/log/pgbackrest</exe-cmd>
</execute>
</host-add>
<backrest-config host="{[host-backup]}" show="n" owner="backrest:postgres" file="{[backrest-config-demo]}">
<title>Configure the <backrest/> repository path</title>
<backrest-config-option section="global" key="repo-path">{[backrest-repo-path]}</backrest-config-option>
</backrest-config>
<p>For this example a new host named <host>backup</host> has been created to store the cluster backups. Follow the instructions in <link section="/installation">Installation</link> to install <backrest/> and <link section="/quickstart/create-repository">Create the Repository</link> to create the <backrest/> repository. The <host>backup</host> host must also be configured with the <host>db-master</host> host/user and database path. The master database will be configured as <id>db1</id> to allow a standby to be added later.</p>
<backrest-config host="{[host-backup]}" owner="backrest:postgres" file="{[backrest-config-demo]}">
<title>Configure <br-option>db1-host</br-option>/<br-option>db1-user</br-option> and <br-option>db1-path</br-option></title>
<backrest-config-option section="demo" key="db1-path">{[db-path]}</backrest-config-option>
<backrest-config-option section="demo" key="db1-host">{[host-db-master]}</backrest-config-option>
<backrest-config-option section="demo" key="db1-user">postgres</backrest-config-option>
<backrest-config-option section="global" key="start-fast">y</backrest-config-option>
<backrest-config-option section="global" key="log-level-stderr">off</backrest-config-option>
</backrest-config>
<p>The database host must be configured with the backup host/user. The default for the <br-option>backup-user</br-option> option is <id>backrest</id>. If the <id>postgres</id> user does restores on the backup host it is best not to also allow the <id>postgres</id> user to perform backups. However, the <id>postgres</id> user can read the repository directly if it is in the same group as the <id>backrest</id> user.</p>
<backrest-config host="{[host-db-master]}" file="{[backrest-config-demo]}" reset="y">
<title>Configure <br-option>backup-host</br-option>/<br-option>backup-user</br-option></title>
<backrest-config-option section="demo" key="db-path">{[db-path]}</backrest-config-option>
<backrest-config-option section="global" key="repo-path">{[backrest-repo-path]}</backrest-config-option>
<backrest-config-option section="global" key="backup-host">{[host-backup]}</backrest-config-option>
<backrest-config-option section="global" key="backup-user">backrest</backrest-config-option>
<backrest-config-option section="global" key="log-level-stderr">off</backrest-config-option>
</backrest-config>
<p>The repository directory will also be removed from the database host. It will not be used anymore so leaving it around may be confusing later on.</p>
<execute-list host="{[host-db-master]}">
<title>Remove repository now that it will be located on the database server</title>
<execute user="root">
<exe-cmd>find {[backrest-repo-path]} -delete</exe-cmd>
</execute>
</execute-list>
<p>Commands are run the same as on a single host configuration except that the <cmd>backup</cmd> and <cmd>expire</cmd> command are run from the <host>backup</host> host and all other commands are run from the <host>database</host> host.</p>
<p>Check that the configuration is correct on both the <host>database</host> and <host>backup</host> hosts. More information about the <cmd>check</cmd> command can be found in <link section="/quickstart/check-configuration">Check the Configuration</link>.</p>
<execute-list host="{[host-db-master]}">
<title>Check the configuration</title>
<execute output="y" filter="n" >
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} check</exe-cmd>
</execute>
</execute-list>
<execute-list host="{[host-backup]}">
<title>Check the configuration</title>
<execute user="backrest" output="y" filter="n" >
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} check</exe-cmd>
</execute>
</execute-list>
</section>
<!-- SECTION => BACKUP HOST - PERFORM BACKUP -->
<section id="perform-backup" depend="install-config">
<title>Perform a Backup</title>
<p>To perform a backup of the <postgres/> cluster run <backrest/> with the <cmd>backup</cmd> command on the <host>backup</host> host.</p>
<execute-list host="{[host-backup]}">
<title>Backup the {[postgres-cluster-demo]} cluster</title>
<execute user="backrest" output="y" filter="n">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} backup</exe-cmd>
</execute>
</execute-list>
<p>Since a new repository was created on the <host>backup</host> host the warning about the incremental backup changing to a full backup was emitted.</p>
</section>
<!-- SECTION => BACKUP HOST - PERFORM RESTORE -->
<section id="perform-restore" depend="perform-backup">
<title>Restore a Backup</title>
<p>To perform a restore of the <postgres/> cluster run <backrest/> with the <cmd>restore</cmd> command on the <host>database</host> host.</p>
<execute-list host="{[host-db-master]}">
<title>Stop the {[postgres-cluster-demo]} cluster, restore, and restart <postgres/></title>
<execute user="root">
<exe-cmd>{[db-cluster-stop]}</exe-cmd>
</execute>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta restore</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
</execute-list>
<p>A new backup must be performed due to the timeline switch.</p>
<execute-list host="{[host-backup]}">
<title>Backup the {[postgres-cluster-demo]} cluster</title>
<execute user="backrest">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} backup</exe-cmd>
</execute>
</execute-list>
</section>
<!-- SECTION => BACKUP HOST - ASYNCHRONOUS ARCHIVING -->
<section id="async-archiving" depend="install-config">
<title>Asynchronous Archiving</title>
<p>The <br-option>archive-async</br-option> option offloads WAL archiving to a separate process to improve throughput. WAL segments are temporarily stored in a local queue on the database server, specified by the <br-option>spool-path</br-option> option, before being transferred to the repository on the backup server.</p>
<p>The spool directory is created to hold the WAL segments while they are waiting to be (optionally) compressed and transferred.</p>
<execute-list host="{[host-db-master]}">
<title>Create the spool directory</title>
<execute user="root">
<exe-cmd>mkdir -m 750 {[spool-path]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chown postgres:postgres {[spool-path]}</exe-cmd>
</execute>
</execute-list>
<p>The spool path must be configured and asynchronous archiving enabled.</p>
<backrest-config host="{[host-db-master]}" file="{[backrest-config-demo]}">
<title>Configure the spool path and asynchronous archiving</title>
<backrest-config-option section="global" key="spool-path">{[spool-path]}</backrest-config-option>
<backrest-config-option section="global" key="archive-async">y</backrest-config-option>
</backrest-config>
<p>The <cmd>check</cmd> command ensures that asynchronous archiving is working.</p>
<execute-list host="{[host-db-master]}">
<title>Check asynchronous archiving</title>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info check</exe-cmd>
<exe-highlight>WAL segment</exe-highlight>
</execute>
</execute-list>
<!-- DEBUG test to ensure async archiving is working in batch until better tests can be written -->
<execute-list host="{[host-db-master]}" keyword="debug">
<title>DEBUG asynchronous archiving</title>
<execute output="n">
<exe-cmd>rm /var/log/pgbackrest/demo-archive-async.log</exe-cmd>
</execute>
<execute output="n">
<exe-cmd>
psql -c "
select pg_create_restore_point('test asynchronous archiving');
select pg_switch_xlog();
select pg_create_restore_point('test asynchronous archiving');
select pg_switch_xlog();
select pg_create_restore_point('test asynchronous archiving');
select pg_switch_xlog();
select pg_create_restore_point('test asynchronous archiving');
select pg_switch_xlog();
select pg_create_restore_point('test asynchronous archiving');
select pg_switch_xlog();"
</exe-cmd>
</execute>
<execute output="n">
<exe-cmd>sleep 5</exe-cmd>
</execute>
<execute output="y">
<exe-cmd>cat /var/log/pgbackrest/demo-archive-async.log</exe-cmd>
<exe-highlight>WAL segments to archive</exe-highlight>
</execute>
</execute-list>
</section>
</section>
<!-- SECTION => START/STOP -->
<section id="start-stop" depend="/quickstart/configure-archiving">
<title>Starting and Stopping</title>
<p>Sometimes it is useful to prevent <backrest/> from running on a system. For example, when failing over from a master to a standby it's best to prevent <backrest/> from running on the old master in case <postgres/> gets restarted or can't be completely killed. This will also prevent <backrest/> from running on <id>cron</id>.</p>
<execute-list host="{[host-db-master]}">
<title>Stop the <backrest/> services</title>
<execute>
<exe-cmd>{[project-exe]} stop</exe-cmd>
</execute>
</execute-list>
<p>New <backrest/> processes will no longer run.</p>
<execute-list host="{[host-db-master]}">
<title>Attempt a backup</title>
<execute err-expect="137" output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} backup</exe-cmd>
<exe-highlight>stop file exists for all stanzas</exe-highlight>
</execute>
</execute-list>
<p>Specify the <br-option>--force</br-option> option to terminate any <backrest/> process that are currently running. If <backrest/> is already stopped then stopping again will generate a warning.</p>
<execute-list host="{[host-db-master]}">
<title>Stop the <backrest/> services again</title>
<execute output="y" filter="n">
<exe-cmd>{[project-exe]} stop</exe-cmd>
</execute>
</execute-list>
<p>Start <backrest/> processes again with the <cmd>start</cmd> command.</p>
<execute-list host="{[host-db-master]}">
<title>Start the <backrest/> services</title>
<execute>
<exe-cmd>{[project-exe]} start</exe-cmd>
</execute>
</execute-list>
<p>It is also possible to stop <backrest/> for a single stanza.</p>
<execute-list host="{[host-db-master]}">
<title>Stop <backrest/> services for the <id>demo</id> stanza</title>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} stop</exe-cmd>
</execute>
<execute err-expect="137" output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} backup</exe-cmd>
<exe-highlight>stop file exists for stanza demo</exe-highlight>
</execute>
</execute-list>
<p>The stanza must also be specified when starting the <backrest/> processes for a single stanza.</p>
<execute-list host="{[host-db-master]}">
<title>Start the <backrest/> services for the <id>demo</id> stanza</title>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} start</exe-cmd>
</execute>
</execute-list>
</section>
<!-- SECTION => REPLICATION -->
<section id="replication" depend="/backup-host/perform-backup">
<title>Replication</title>
<p>Replication allows multiple copies of a <postgres/> cluster (called standbys) to be created from a single master. The standbys are useful for balancing reads and to provide redundancy in case the master host fails.</p>
<section id="hot-standby">
<title>Hot Standby</title>
<p>A hot standby performs replication using the WAL archive and allows read-only queries.</p>
<p>A new host named <host>db-standby</host> will be created to run the standby. Follow the instructions in <link section="/installation">Installation</link> to install <backrest/>, <link section="/quickstart/setup-demo-cluster">Setup Demo Cluster</link> to setup the demo cluster, and <link section="/quickstart/create-repository">Create the Repository</link> to create the <backrest/> repository on the <host>db-standby</host> host.</p>
<host-add name="{[host-db-standby]}" user="{[host-db-standby-user]}" image="{[host-db-standby-image]}" os="{[host-os]}" mount="{[host-db-standby-mount]}">
<!-- Install packages -->
<execute user="root" keyword="default">
<exe-cmd>apt-get -y install libdbd-pg-perl</exe-cmd>
</execute>
<execute user="root" keyword="co6">
<exe-cmd>yum -y install perl perl-Time-HiRes perl-parent
perl-JSON perl-Digest-SHA perl-DBD-Pg</exe-cmd>
</execute>
<!-- Install backrest -->
<execute user="root">
<exe-cmd>cp -r /backrest/lib/pgBackRest {[perl-lib-path]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>find {[perl-lib-path]}/pgBackRest -type f -exec chmod 644 {} +</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>find {[perl-lib-path]}/pgBackRest -type d -exec chmod 755 {} +</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>cp /backrest/bin/{[project-exe]}
{[perl-bin-path]}/{[project-exe]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chmod 755 {[perl-bin-path]}/{[project-exe]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>mkdir -m 770 /var/log/pgbackrest</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chown postgres:postgres /var/log/pgbackrest</exe-cmd>
</execute>
<!-- Create repository -->
<execute user="root">
<exe-cmd>mkdir {[backrest-repo-path]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chmod 750 {[backrest-repo-path]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>chown postgres:postgres {[backrest-repo-path]}</exe-cmd>
</execute>
<!-- Start the cluster -->
<execute user="root" output="y" filter="n">
<exe-cmd>{[db-cluster-create]}</exe-cmd>
</execute>
</host-add>
<postgres-config host="{[host-db-standby]}" keyword="default" file="{[postgres-config-demo]}" show="n">
<title>Set options</title>
<postgres-config-option key="log_line_prefix">''</postgres-config-option>
<postgres-config-option key="log_filename">'postgresql.log'</postgres-config-option>
</postgres-config>
<p><backrest/> configuration is very similar to <host>db-master</host> except that the <pg-option>standby_mode</pg-option> setting will be enabled to keep the cluster in recovery mode when the end of the WAL stream has been reached.</p>
<backrest-config host="{[host-db-standby]}" file="{[backrest-config-demo]}">
<title>Configure <backrest/> on the standby</title>
<backrest-config-option section="demo" key="db-path">{[db-path]}</backrest-config-option>
<backrest-config-option section="global" key="repo-path">{[backrest-repo-path]}</backrest-config-option>
<backrest-config-option section="global" key="backup-host">{[host-backup]}</backrest-config-option>
<backrest-config-option section="global" key="log-level-stderr">off</backrest-config-option>
<backrest-config-option section="demo" key="recovery-option">standby_mode=on</backrest-config-option>
</backrest-config>
<p>Now the standby can be created with the <cmd>restore</cmd> command.</p>
<execute-list host="{[host-db-standby]}">
<title>Restore the {[postgres-cluster-demo]} standby cluster</title>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta restore</exe-cmd>
</execute>
<execute output="y" filter="n">
<exe-cmd>cat {[postgres-recovery-demo]}</exe-cmd>
</execute>
</execute-list>
<p>Note that the <pg-setting>standby_mode</pg-setting> setting has been written into the <file>recovery.conf</file> file. Configuring recovery settings in <backrest/> means that the <file>recovery.conf</file> file does not need to be stored elsewhere since it will be properly recreated with each restore. The <br-setting>--type=preserve</br-setting> option can be used with the <cmd>restore</cmd> to leave the existing <file>recovery.conf</file> file in place if that behavior is preferred.</p>
<p>The <pg-setting>hot_standby</pg-setting> setting must be enabled before starting <postgres/> to allow read-only connections on <host>db-standby</host>. Otherwise, connection attempts will be refused.</p>
<postgres-config host="{[host-db-standby]}" file="{[postgres-config-demo]}">
<title>Enable <pg-option>hot_standby</pg-option></title>
<postgres-config-option key="hot_standby">on</postgres-config-option>
</postgres-config>
<execute-list host="{[host-db-standby]}">
<title>Start <postgres/></title>
<execute user="root" show="n">
<exe-cmd>rm {[postgres-log-demo]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
</execute-list>
<p>The <postgres/> log gives valuable information about the recovery. Note especially that the cluster has entered standby mode and is ready to accept read-only connections.</p>
<execute-list host="{[host-db-standby]}">
<title>Examine the <postgres/> log output for log messages indicating success</title>
<execute output="y">
<exe-cmd>cat {[postgres-log-demo]}</exe-cmd>
<exe-highlight>entering standby mode|database system is ready to accept read only connections</exe-highlight>
</execute>
</execute-list>
<p>An easy way to test that replication is properly configured is to create a table on <host>db-master</host>.</p>
<execute-list host="{[host-db-master]}">
<title>Create a new table on the master</title>
<execute output="y">
<exe-cmd>
psql -c "
begin;
create table replicated_table (message text);
insert into replicated_table values ('{[test-table-data]}');
commit;
select * from replicated_table";
</exe-cmd>
<exe-highlight>{[test-table-data]}</exe-highlight>
</execute>
</execute-list>
<p>And then query the same table on <host>db-standby</host>.</p>
<execute-list host="{[host-db-standby]}">
<title>Query new table on the standby</title>
<execute output="y" err-expect="1">
<exe-cmd>psql -c "select * from replicated_table;"</exe-cmd>
<exe-highlight>does not exist</exe-highlight>
</execute>
</execute-list>
<p>So, what went wrong? Since <postgres/> is pulling WAL segments from the archive to perform replication, changes won't be seen on the standby until the WAL segment that contains those changes is pushed from <host>db-master</host>.</p>
<p>This can be done manually by calling <code>pg_switch_xlog()</code> which pushes the current WAL segment to the archive (a new WAL segment is created to contain further changes).</p>
<execute-list host="{[host-db-master]}">
<title>Call <code>pg_switch_xlog()</code></title>
<execute output="y" filter="n">
<exe-cmd>
psql -c "select *, current_timestamp from pg_switch_xlog()";
</exe-cmd>
</execute>
</execute-list>
<p>Now after a short delay the table will appear on <host>db-standby</host>.</p>
<execute-list host="{[host-db-standby]}">
<title>Now the new table exists on the standby (may require a few retries)</title>
<execute output="y" retry="15" filter="n">
<exe-cmd>psql -c "
select *, current_timestamp from replicated_table"</exe-cmd>
<exe-highlight>{[test-table-data]}</exe-highlight>
</execute>
</execute-list>
</section>
<section id="streaming" depend="hot-standby">
<title>Streaming Replication</title>
<p>Instead of relying solely on the WAL archive, streaming replication makes a direct connection to the master and applies changes as soon as they are made on the master. This results in much less lag between the master and standby.</p>
<p>Streaming replication requires a user with the replication privilege.</p>
<execute-list host="{[host-db-master]}">
<title>Create replication user</title>
<execute output="y" filter="n">
<exe-cmd>
psql -c "
create user replicator password 'jw8s0F4' replication";
</exe-cmd>
</execute>
</execute-list>
<p>The <file>pg_hba.conf</file> file must be updated to allow the standby to connect as the replication user. Be sure to replace the IP address below with the actual IP address of your <host>db-master</host>. A reload will be required after modifying the <file>pg_hba.conf</file> file.</p>
<execute-list host="{[host-db-master]}">
<title>Create <file>pg_hba.conf</file> entry for replication user</title>
<execute>
<exe-cmd>
sh -c 'echo
"host replication replicator {[host-db-standby-ip]}/32 md5"
>> {[postgres-hba-demo]}'
</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>{[db-cluster-reload]}</exe-cmd>
</execute>
</execute-list>
<!-- <p>The <pg-option>max_wal_senders</pg-option> setting must be increased (the default is 0) to allow standbys to connect to the master. It will be set to 3 to allow more standbys to be created later. <postgres/> must restarted for this setting to take effect.</p>
<postgres-config host="{[host-db-master]}" file="{[postgres-config-demo]}">
<title>Increase <pg-option>max_wal_senders</pg-option></title>
<postgres-config-option key="max_wal_senders">3</postgres-config-option>
</postgres-config>
<execute-list host="{[host-db-master]}">
<title>Restart <postgres/></title>
<execute user="root">
<exe-cmd>{[db-cluster-restart]}</exe-cmd>
</execute>
</execute-list> -->
<p>The standby needs to know how to contact the master so the <pg-option>primary_conninfo</pg-option> setting will be configured in <backrest/>.</p>
<backrest-config host="{[host-db-standby]}" file="{[backrest-config-demo]}">
<title>Set <pg-option>primary_conninfo</pg-option></title>
<backrest-config-option section="demo" key="recovery-option">primary_conninfo=host={[host-db-master-ip]} port=5432 user=replicator</backrest-config-option>
</backrest-config>
<p>It is possible to configure a password in the <pg-option>primary_conninfo</pg-option> setting but using a <file>.pgpass</file> file is more flexible and secure.</p>
<execute-list host="{[host-db-standby]}">
<title>Configure the replication password in the <file>.pgpass</file> file.</title>
<execute>
<exe-cmd>
sh -c 'echo
"{[host-db-master-ip]}:*:replication:replicator:jw8s0F4"
>> {[postgres-pgpass]}'
</exe-cmd>
</execute>
<execute>
<exe-cmd>chmod 600 {[postgres-pgpass]}</exe-cmd>
</execute>
</execute-list>
<p>Now the standby can be created with the <cmd>restore</cmd> command.</p>
<execute-list host="{[host-db-standby]}">
<title>Stop <postgres/> and restore the {[postgres-cluster-demo]} standby cluster</title>
<execute user="root" err-suppress="y">
<exe-cmd>{[db-cluster-stop]}</exe-cmd>
</execute>
<execute>
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta restore</exe-cmd>
</execute>
<execute output="y" filter="n">
<exe-cmd>cat {[postgres-recovery-demo]}</exe-cmd>
</execute>
</execute-list>
<p keyword="co6">By default {[user-guide-os]} stores the <file>postgresql.conf</file> file in the <postgres/> data directory. That means the change made to <file>postgresql.conf</file> was overwritten by the last restore and the <pg-option>hot_standby</pg-option> setting must be enabled again. Other solutions to this problem are to store the <file>postgresql.conf</file> file elsewhere or to enable the <pg-option>hot_standby</pg-option> setting on the <host>db-master</host> host where it will be ignored.</p>
<postgres-config host="{[host-db-standby]}" keyword="co6" file="{[postgres-config-demo]}">
<title>Enable <pg-option>hot_standby</pg-option></title>
<postgres-config-option key="hot_standby">on</postgres-config-option>
</postgres-config>
<execute-list host="{[host-db-standby]}">
<title>Start <postgres/></title>
<execute user="root" show="n">
<exe-cmd>rm {[postgres-log-demo]}</exe-cmd>
</execute>
<execute user="root">
<exe-cmd>{[db-cluster-start]}</exe-cmd>
</execute>
<execute show="n">
<exe-cmd>{[db-cluster-wait]}</exe-cmd>
</execute>
</execute-list>
<p>The <postgres/> log will confirm that streaming replication has started.</p>
<execute-list host="{[host-db-standby]}">
<title>Examine the <postgres/> log output for log messages indicating success</title>
<execute output="y">
<exe-cmd>cat {[postgres-log-demo]}</exe-cmd>
<exe-highlight>started streaming WAL from primary</exe-highlight>
</execute>
</execute-list>
<p>Now when a table is created on <host>db-master</host> it will appear on <host>db-standby</host> quickly and without the need to call <code>pg_switch_xlog()</code>.</p>
<execute-list host="{[host-db-master]}">
<title>Create a new table on the master</title>
<execute output="y">
<exe-cmd>
psql -c "
begin;
create table stream_table (message text);
insert into stream_table values ('{[test-table-data]}');
commit;
select *, current_timestamp from stream_table";
</exe-cmd>
<exe-highlight>{[test-table-data]}</exe-highlight>
</execute>
</execute-list>
<execute-list host="{[host-db-standby]}">
<title>Query table on the standby</title>
<execute output="y" retry="2" filter="n">
<exe-cmd>psql -c "
select *, current_timestamp from stream_table"</exe-cmd>
<exe-highlight>{[test-table-data]}</exe-highlight>
</execute>
</execute-list>
</section>
</section>
<!-- SECTION => STANDBY-BACKUP -->
<section id="standby-backup" depend="/replication/streaming">
<title>Backup from a Standby</title>
<p><backrest/> can perform backups on a standby instead of the master. Standby backups require the <host>db-standby</host> host to be configured and the <br-option>backup-standby</br-option> option enabled.</p>
<backrest-config host="{[host-backup]}" owner="backrest:postgres" file="{[backrest-config-demo]}">
<title>Configure <br-option>db2-host</br-option>/<br-option>db2-user</br-option> and <br-option>db2-path</br-option></title>
<backrest-config-option section="demo" key="db2-path">{[db-path]}</backrest-config-option>
<backrest-config-option section="demo" key="db2-host">{[host-db-standby]}</backrest-config-option>
<backrest-config-option section="demo" key="db2-user">postgres</backrest-config-option>
<backrest-config-option section="global" key="backup-standby">y</backrest-config-option>
</backrest-config>
<p>Both the master and standby databases are required to perform the backup, though the vast majority of the files will be copied from the standby to reduce load on the master. The database hosts can be configured in any order. <backrest/> will automatically determine which is the master and which is the standby.</p>
<execute-list host="{[host-backup]}">
<title>Backup the {[postgres-cluster-demo]} cluster from <host>db-standby</host></title>
<execute user="backrest" output="y" filter="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --log-level-console=detail backup</exe-cmd>
<exe-highlight>backup file db-master|replay on the standby</exe-highlight>
</execute>
</execute-list>
<p>This incremental backup shows that most of the files are copied from the <host>db-standby</host> host and only a few are copied from the <host>db-master</host> host.</p>
<p><backrest/> creates a standby backup that is identical to a backup performed on the master. It does this by starting/stopping the backup on the <host>db-master</host> host, copying only files that are replicated from the <host>db-standby</host> host, the copying the remaining few files from the <host>db-master</host> host. This means that logs and statistics from the master database will be included in the backup.</p>
</section>
</doc>