mirror of
https://github.com/pgbackrest/pgbackrest.git
synced 2024-12-12 10:04:14 +02:00
6397d73535
Update Ubuntu 12.04 to 16.04. Version 16.04 is recently EOL but testing on an old version is beneficial. Update Ubuntu 18.04 to 20.04. Update Fedora 32 to 33. Version 34 would have been preferred but there were some build issues, i.e. the default shell did not work with configure, and after ksh was installed configure locked up. Add --no-install-recommends to apt-get commands to save a bit of time and space. Update test Dockerfile to run in multiple steps. This makes the container larger but also makes rebuilding after changes faster. The --squash option may be used to keep the container small. Remove obsolete casts in protocol/parallel module. These casts were included in the original migration because Ubuntu 12.04 32-bit gcc required them, but Ubuntu 16.04 32-bit gcc complains. There is no production issue here since at this point in the code the file descriptors are guaranteed to be >= 0.
3536 lines
200 KiB
XML
3536 lines
200 KiB
XML
<?xml version="1.0" encoding="UTF-8"?>
|
|
<!DOCTYPE doc SYSTEM "doc.dtd">
|
|
<doc title="{[project]} User Guide" subtitle="{[user-guide-subtitle]} / {[postgres]} {[pg-version-min]}-{[pg-version-max]}" cmd-line-len="85">
|
|
<description>The {[project]} User Guide demonstrates how to quickly and easily setup {[project]} for your {[postgres]} database. Step-by-step instructions lead the user through all the important features of the fastest, most reliable {[postgres]} backup and restore solution.</description>
|
|
|
|
<!-- Variables used by the rest of the script -->
|
|
<variable-list>
|
|
<!-- Defined OS types -->
|
|
<variable key="os-debian">debian</variable>
|
|
<variable key="os-rhel">rhel</variable>
|
|
|
|
<!-- OS titles -->
|
|
<variable key="os-debian-title">Debian & Ubuntu</variable>
|
|
<variable key="os-rhel-title">RHEL & CentOS 7-8</variable>
|
|
|
|
<!-- Base PostgreSQL versions -->
|
|
<variable key="os-debian-pg-version">12</variable>
|
|
<variable key="os-debian-pg-version-min">12</variable>
|
|
<variable key="os-debian-pg-version-max">13</variable>
|
|
|
|
<variable key="os-rhel-pg-version">10</variable>
|
|
<variable key="os-rhel-pg-version-min">9.6</variable>
|
|
<variable key="os-rhel-pg-version-max">11</variable>
|
|
|
|
<!-- User-defined package to use in documentation (use "apt" to install the current PGDG apt package) -->
|
|
<variable key="package">none</variable>
|
|
|
|
<!-- Defines the general OS type that will be used to generate commands. Also supported: rhel -->
|
|
<variable key="os-type">debian</variable>
|
|
|
|
<!-- Will encryption be used in the documentation? It can be useful for testing to omit encryption. -->
|
|
<variable key="encrypt">y</variable>
|
|
|
|
<!-- Common if expressions for testing os-type -->
|
|
<variable key="os-type-is-debian">'{[os-type]}' eq '{[os-debian]}'</variable>
|
|
<variable key="os-type-is-rhel">'{[os-type]}' eq '{[os-rhel]}'</variable>
|
|
|
|
<!-- Defines the container image that will be used to build the host -->
|
|
<variable key="os-image" if="{[os-type-is-debian]}">ubuntu:16.04</variable>
|
|
<variable key="os-image" if="{[os-type-is-rhel]}">centos:8</variable>
|
|
|
|
<variable key="user-guide-subtitle" if="{[os-type-is-debian]}">{[os-debian-title]}</variable>
|
|
<variable key="user-guide-subtitle" if="{[os-type-is-rhel]}">{[os-rhel-title]}</variable>
|
|
|
|
<variable key="user-guide-os" if="{[os-type-is-debian]}">Debian/Ubuntu</variable>
|
|
<variable key="user-guide-os" if="{[os-type-is-rhel]}">RHEL/CentOS 7-8</variable>
|
|
|
|
<variable key="pgbackrest-repo-path">/pgbackrest</variable>
|
|
|
|
<!-- Path where CA certificates are installed -->
|
|
<variable key="ca-cert-path" if="{[os-type-is-debian]}">/usr/local/share/ca-certificates</variable>
|
|
<variable key="ca-cert-path" if="{[os-type-is-rhel]}">/etc/pki/ca-trust/source/anchors</variable>
|
|
|
|
<!-- Path where fake certificates are located -->
|
|
<variable key="fake-cert-path-relative">resource/fake-cert</variable>
|
|
<variable key="fake-cert-path">{[host-repo-path]}/doc/{[fake-cert-path-relative]}</variable>
|
|
|
|
<!-- PostreSQL versions to run documentation for and min/max versions represented -->
|
|
<variable key="pg-version" if="{[os-type-is-debian]}">{[os-debian-pg-version]}</variable>
|
|
<variable key="pg-version-min" if="{[os-type-is-debian]}">{[os-debian-pg-version-min]}</variable>
|
|
<variable key="pg-version-max" if="{[os-type-is-debian]}">{[os-debian-pg-version-max]}</variable>
|
|
|
|
<variable key="pg-version" if="{[os-type-is-rhel]}">{[os-rhel-pg-version]}</variable>
|
|
<variable key="pg-version-min" if="{[os-type-is-rhel]}">{[os-rhel-pg-version-min]}</variable>
|
|
<variable key="pg-version-max" if="{[os-type-is-rhel]}">{[os-rhel-pg-version-max]}</variable>
|
|
|
|
<variable key="pg-version-nodot" eval="y">my $version = '{[pg-version]}'; $version =~ s/\.//g; return $version;</variable>
|
|
|
|
<!-- WAL level should be the minimum required for replication -->
|
|
<variable key="wal-level" if="{[pg-version]} < 9.6">hot_standby</variable>
|
|
<variable key="wal-level" if="{[pg-version]} >= 9.6">replica</variable>
|
|
|
|
<variable key="pg-version-upgrade" if="{[os-type-is-debian]}">13</variable>
|
|
<variable key="pg-version-upgrade" if="{[os-type-is-rhel]}">11</variable>
|
|
<variable key="pg-version-upgrade-nodot" eval="y">my $version = '{[pg-version-upgrade]}'; $version =~ s/\.//g; return $version;</variable>
|
|
|
|
<variable key="pg-bin-path" if="{[os-type-is-debian]}">/usr/lib/postgresql/{[pg-version]}/bin</variable>
|
|
<variable key="pg-bin-path" if="{[os-type-is-rhel]}">/usr/pgsql-{[pg-version]}/bin</variable>
|
|
|
|
<variable key="pg-bin-upgrade-path" if="{[os-type-is-debian]}">/usr/lib/postgresql/{[pg-version-upgrade]}/bin</variable>
|
|
<variable key="pg-bin-upgrade-path" if="{[os-type-is-rhel]}">/usr/pgsql-{[pg-version-upgrade]}/bin</variable>
|
|
|
|
<variable key="pg-home-path" if="{[os-type-is-debian]}">/var/lib/postgresql</variable>
|
|
<variable key="pg-home-path" if="{[os-type-is-rhel]}">/var/lib/pgsql</variable>
|
|
|
|
<variable key="pg-group">postgres</variable>
|
|
|
|
<variable key="backrest-repo-path">/var/lib/pgbackrest</variable>
|
|
<variable key="backrest-repo-cipher-type">aes-256-cbc</variable>
|
|
<variable key="backrest-repo-cipher-pass">zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO</variable>
|
|
<variable key="br-bin">/usr/bin/pgbackrest</variable>
|
|
<variable key="br-user">pgbackrest</variable>
|
|
<variable key="br-group">{[br-user]}</variable>
|
|
<variable key="br-home-path">/home/{[br-user]}</variable>
|
|
|
|
<variable key="postgres-cluster-demo">demo</variable>
|
|
<variable key="backrest-config-path">/etc/{[project-exe]}</variable>
|
|
<variable key="backrest-config-include-path">{[backrest-config-path]}/conf.d</variable>
|
|
<variable if="'{[package]}' eq 'none'" key="backrest-config-demo">{[backrest-config-path]}/{[project-exe]}.conf</variable>
|
|
<variable if="'{[package]}' ne 'none'" key="backrest-config-demo">/etc/{[project-exe]}.conf</variable>
|
|
|
|
<variable key="pg-path-default" if="{[os-type-is-debian]}">/var/lib/postgresql/[version]/[cluster]</variable>
|
|
<variable key="pg-path-default" if="{[os-type-is-rhel]}">/var/lib/pgsql/[version]/data</variable>
|
|
|
|
<variable key="pg-path" if="{[os-type-is-debian]}">/var/lib/postgresql/{[pg-version]}/{[postgres-cluster-demo]}</variable>
|
|
<variable key="pg-path" if="{[os-type-is-rhel]}">/var/lib/pgsql/{[pg-version]}/data</variable>
|
|
|
|
<variable key="pg-path-upgrade" if="{[os-type-is-debian]}">/var/lib/postgresql/{[pg-version-upgrade]}/{[postgres-cluster-demo]}</variable>
|
|
<variable key="pg-path-upgrade" if="{[os-type-is-rhel]}">/var/lib/pgsql/{[pg-version-upgrade]}/data</variable>
|
|
|
|
<variable key="spool-path">/var/spool/pgbackrest</variable>
|
|
|
|
<variable key="postgres-config-demo" if="{[os-type-is-debian]}">/etc/postgresql/{[pg-version]}/{[postgres-cluster-demo]}/postgresql.conf</variable>
|
|
<variable key="postgres-config-demo" if="{[os-type-is-rhel]}">{[pg-path]}/postgresql.conf</variable>
|
|
|
|
<variable key="postgres-config-demo-upgrade" if="{[os-type-is-debian]}">/etc/postgresql/{[pg-version-upgrade]}/{[postgres-cluster-demo]}/postgresql.conf</variable>
|
|
<variable key="postgres-config-demo-upgrade" if="{[os-type-is-rhel]}">{[pg-path-upgrade]}/postgresql.conf</variable>
|
|
|
|
<variable key="postgres-hba-demo" if="{[os-type-is-debian]}">/etc/postgresql/{[pg-version]}/{[postgres-cluster-demo]}/pg_hba.conf</variable>
|
|
<variable key="postgres-hba-demo" if="{[os-type-is-rhel]}">{[pg-path]}/pg_hba.conf</variable>
|
|
|
|
<variable key="postgres-hba-demo-upgrade" if="{[os-type-is-debian]}">/etc/postgresql/{[pg-version-upgrade]}/{[postgres-cluster-demo]}/pg_hba.conf</variable>
|
|
<variable key="postgres-hba-demo-upgrade" if="{[os-type-is-rhel]}">{[pg-path-upgrade]}/pg_hba.conf</variable>
|
|
|
|
<variable key="postgres-pgpass">{[pg-home-path]}/.pgpass</variable>
|
|
|
|
<variable key="postgres-log-demo" if="{[os-type-is-debian]}">/var/log/postgresql/postgresql-{[pg-version]}-{[postgres-cluster-demo]}.log</variable>
|
|
<variable key="postgres-log-demo" if="{[os-type-is-rhel]} && {[pg-version]} < 10">{[pg-path]}/pg_log/postgresql.log</variable>
|
|
<variable key="postgres-log-demo" if="{[os-type-is-rhel]} && {[pg-version]} >= 10">{[pg-path]}/log/postgresql.log</variable>
|
|
|
|
<variable key="postgres-log-pgstartup-demo" if="{[os-type-is-rhel]}">/var/lib/pgsql/{[pg-version]}/pgstartup.log</variable>
|
|
|
|
<variable key="pg-recovery-file-demo" if="{[pg-version]} < 12">recovery.conf</variable>
|
|
<variable key="pg-recovery-file-demo" if="{[pg-version]} >= 12">postgresql.auto.conf</variable>
|
|
<variable key="pg-recovery-path-demo">{[pg-path]}/{[pg-recovery-file-demo]}</variable>
|
|
|
|
<!-- Select correct WAL switch function based on the version of PostgreSQL -->
|
|
<variable key="pg-switch-wal" if="{[pg-version]} < 10">pg_switch_xlog</variable>
|
|
<variable key="pg-switch-wal" if="{[pg-version]} >= 10">pg_switch_wal</variable>
|
|
|
|
<!-- Azure Settings -->
|
|
<variable key="azure-all">n</variable> <!-- Build all the documentation with Azure? -->
|
|
<variable key="azure-local">y</variable>
|
|
<variable key="azure-account">pgbackrest</variable>
|
|
<variable key="azure-container">demo-container</variable>
|
|
<variable key="azure-repo">demo-repo</variable>
|
|
<variable key="azure-key-type">shared</variable>
|
|
<variable key="azure-key">YXpLZXk=</variable>
|
|
|
|
<!-- GCS Settings -->
|
|
<variable key="gcs-all">n</variable> <!-- Build all the documentation with GCS? -->
|
|
<variable key="gcs-bucket">demo-bucket</variable>
|
|
<variable key="gcs-repo">demo-repo</variable>
|
|
<variable key="gcs-key-type">service</variable>
|
|
<variable key="gcs-key">/etc/pgbackrest/gcs-key.json</variable>
|
|
|
|
<!-- S3 Settings -->
|
|
<variable key="s3-all">n</variable> <!-- Build all the documentation with S3? -->
|
|
<variable key="s3-local">y</variable>
|
|
<variable key="s3-bucket">demo-bucket</variable>
|
|
<variable key="s3-repo">demo-repo</variable>
|
|
<variable key="s3-region">us-east-1</variable>
|
|
<variable key="s3-endpoint">s3.{[s3-region]}.amazonaws.com</variable>
|
|
<variable key="s3-key">accessKey1</variable>
|
|
<variable key="s3-key-secret">verySecretKey1</variable>
|
|
|
|
<!-- Is any object store being used to build all the documentation? -->
|
|
<variable key="object-any-all">('{[azure-all]}' eq 'y' || '{[gcs-all]}' eq 'y' || '{[s3-all]}' eq 'y')</variable>
|
|
|
|
<!-- Hosts -->
|
|
<variable key="host-image">pgbackrest/doc:{[os-type]}</variable>
|
|
|
|
<variable key="host-option">-v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /tmp/$(mktemp -d):/run</variable>
|
|
|
|
<variable key="host-user" eval="y">use English; getpwuid($UID) eq 'root' ? 'vagrant' : getpwuid($UID) . ''</variable>
|
|
<variable key="host-mount">{[host-repo-path]}:{[pgbackrest-repo-path]}</variable>
|
|
<variable key="image-repo">pgbackrest/test</variable>
|
|
|
|
<variable key="host-azure-id">azure</variable>
|
|
<variable key="host-azure">azure-server</variable>
|
|
|
|
<variable key="host-s3-id">s3</variable>
|
|
<variable key="host-s3">s3-server</variable>
|
|
|
|
<variable key="host-pg1-id">pg1</variable>
|
|
<variable key="host-pg1">pg-primary</variable>
|
|
<variable key="host-pg1-user">{[host-user]}</variable>
|
|
<variable key="host-pg1-image">{[host-image]}</variable>
|
|
<variable key="host-pg1-mount">{[host-mount]}</variable>
|
|
|
|
<variable key="host-build-id">build</variable>
|
|
<variable key="host-build">build</variable>
|
|
<variable key="host-build-user">{[host-user]}</variable>
|
|
<variable key="host-build-image">{[host-image]}</variable>
|
|
<variable key="host-build-mount">{[host-mount]}</variable>
|
|
|
|
<variable key="host-pg2-id">pg2</variable>
|
|
<variable key="host-pg2">pg-standby</variable>
|
|
<variable key="host-pg2-user">{[host-pg1-user]}</variable>
|
|
<variable key="host-pg2-image">{[host-image]}</variable>
|
|
<variable key="host-pg2-mount">{[host-mount]}</variable>
|
|
|
|
<variable key="host-repo1-id">repo1</variable>
|
|
<variable key="host-repo1">repository</variable>
|
|
<variable key="host-repo1-user">{[host-user]}</variable>
|
|
<variable key="host-repo1-image">{[host-image]}</variable>
|
|
<variable key="host-repo1-mount">{[host-mount]}</variable>
|
|
|
|
<!-- Commands for various operations -->
|
|
<variable key="cmd-backup-last">pgbackrest repo-ls backup/demo --filter="(F|D|I)$" --sort=desc | head -1</variable>
|
|
|
|
<!-- Data used to demonstrate backup/restore operations -->
|
|
<variable key="test-table-data">Important Data</variable>
|
|
|
|
<!-- Database cluster commmands -->
|
|
<variable key="pg-cluster-wait">sleep 2</variable>
|
|
|
|
<variable key="pg-cluster-create" if="{[os-type-is-debian]}">pg_createcluster {[pg-version]} {[postgres-cluster-demo]}</variable>
|
|
|
|
<variable key="pg-cluster-create-upgrade" if="{[os-type-is-debian]}">pg_createcluster {[pg-version-upgrade]} {[postgres-cluster-demo]}</variable>
|
|
|
|
<variable key="pg-cluster-start" if="{[os-type-is-debian]}">pg_ctlcluster {[pg-version]} {[postgres-cluster-demo]} start</variable>
|
|
<variable key="pg-cluster-start" if="{[os-type-is-rhel]}">systemctl start postgresql-{[pg-version]}.service</variable>
|
|
|
|
<variable key="pg-cluster-start-upgrade" if="{[os-type-is-debian]}">pg_ctlcluster {[pg-version-upgrade]} {[postgres-cluster-demo]} start</variable>
|
|
<variable key="pg-cluster-start-upgrade" if="{[os-type-is-rhel]}">systemctl start postgresql-{[pg-version-upgrade]}.service</variable>
|
|
|
|
<variable key="pg-cluster-stop" if="{[os-type-is-debian]}">pg_ctlcluster {[pg-version]} {[postgres-cluster-demo]} stop</variable>
|
|
<variable key="pg-cluster-stop" if="{[os-type-is-rhel]}">systemctl stop postgresql-{[pg-version]}.service</variable>
|
|
|
|
<variable key="pg-cluster-restart" if="{[os-type-is-debian]}">pg_ctlcluster {[pg-version]} {[postgres-cluster-demo]} restart</variable>
|
|
<variable key="pg-cluster-restart" if="{[os-type-is-rhel]}">systemctl restart postgresql-{[pg-version]}.service</variable>
|
|
|
|
<variable key="pg-cluster-reload" if="{[os-type-is-debian]}">pg_ctlcluster {[pg-version]} {[postgres-cluster-demo]} reload</variable>
|
|
<variable key="pg-cluster-reload" if="{[os-type-is-rhel]}">systemctl reload postgresql-{[pg-version]}.service</variable>
|
|
|
|
<variable key="pg-cluster-check" if="{[os-type-is-debian]}">pg_lsclusters</variable>
|
|
<variable key="pg-cluster-check" if="{[os-type-is-rhel]}">systemctl status postgresql-{[pg-version]}.service</variable>
|
|
|
|
<variable key="pg-cluster-check-upgrade" if="{[os-type-is-debian]}">pg_lsclusters</variable>
|
|
<variable key="pg-cluster-check-upgrade" if="{[os-type-is-rhel]}">systemctl status postgresql-{[pg-version-upgrade]}.service</variable>
|
|
|
|
<!-- Add more tables to make the backup more interesting. This is a rough and ready solution that can be pasted into the
|
|
document where needed to grow the number of files as needed for performance testing. -->
|
|
<!-- <execute user="postgres" show="n">
|
|
<exe-cmd>psql -c "
|
|
create or replace function create_test_table(prefix int, scale int) returns void as \$\$
|
|
declare
|
|
index int;
|
|
begin
|
|
for index in 1 .. scale loop
|
|
execute 'create table test_' || prefix || '_' || index || ' (id int)';
|
|
end loop;
|
|
end \$\$ LANGUAGE plpgsql;"</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>
|
|
bash -c 'for i in {1..100}; do psql -c "select create_test_table(${i?}, 1000)"; done'
|
|
</exe-cmd>
|
|
</execute> -->
|
|
|
|
<!-- Common commands -->
|
|
<variable key="ssh-key-install">
|
|
mkdir -p -m 700 /root/.ssh && \
|
|
echo '-----BEGIN RSA PRIVATE KEY-----' > /root/.ssh/id_rsa && \
|
|
echo 'MIICXwIBAAKBgQDR0yJsZW5d5LcqteiOtv8d+FFeFFHDPI0VTcTOdMn1iDiIP1ou' >> /root/.ssh/id_rsa && \
|
|
echo 'X3Q2OyNjsBaDbsRJd+sp9IRq1LKX3zsBcgGZANwm0zduuNEPEU94ajS/uRoejIqY' >> /root/.ssh/id_rsa && \
|
|
echo '/XkKOpnEF6ZbQ2S7TaE4sWeGLvba7kUFs0QTOO+N+nV2dMbdqZf6C8lazwIDAQAB' >> /root/.ssh/id_rsa && \
|
|
echo 'AoGBAJXa6xzrnFVmwgK5BKzYuX/YF5TPgk2j80ch0ct50buQXH/Cb0/rUH5i4jWS' >> /root/.ssh/id_rsa && \
|
|
echo 'T6Hy/DFUehnuzpvV6O9auTOhDs3BhEKFRuRLn1nBwTtZny5Hh+cw7azUCEHFCJlz' >> /root/.ssh/id_rsa && \
|
|
echo 'makCrVbgawtno6oU/pFgQm1FcxD0f+Me5ruNcLHqUZsPQwkRAkEA+8pG+ckOlz6R' >> /root/.ssh/id_rsa && \
|
|
echo 'AJLIHedmfcrEY9T7sfdo83bzMOz8H5soUUP4aOTLJYCla1LO7JdDnXMGo0KxaHBP' >> /root/.ssh/id_rsa && \
|
|
echo 'l8j5zDmVewJBANVVPDJr1w37m0FBi37QgUOAijVfLXgyPMxYp2uc9ddjncif0063' >> /root/.ssh/id_rsa && \
|
|
echo '0Wc0FQefoPszf3CDrHv/RHvhHq97jXDwTb0CQQDgH83NygoS1r57pCw9chzpG/R0' >> /root/.ssh/id_rsa && \
|
|
echo 'aMEiSPhCvz757fj+qT3aGIal2AJ7/2c/gRZvwrWNETZ3XIZOUKqIkXzJLPjBAkEA' >> /root/.ssh/id_rsa && \
|
|
echo 'wnP799W2Y8d4/+VX2pMBkF7lG7sSviHEq1sP2BZtPBRQKSQNvw3scM7XcGh/mxmY' >> /root/.ssh/id_rsa && \
|
|
echo 'yx0qpqfKa8SKbNgI1+4iXQJBAOlg8MJLwkUtrG+p8wf69oCuZsnyv0K6UMDxm6/8' >> /root/.ssh/id_rsa && \
|
|
echo 'cbvfmvODulYFaIahaqHWEZoRo5CLYZ7gN43WHPOrKxdDL78=' >> /root/.ssh/id_rsa && \
|
|
echo '-----END RSA PRIVATE KEY-----' >> /root/.ssh/id_rsa && \
|
|
echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDR0yJsZW5d5LcqteiOtv8d+FFeFFHDPI0VTcTOdMn1iDiIP1ouX3Q2OyNjsBaDbsRJd+sp9IRq1LKX3zsBcgGZANwm0zduuNEPEU94ajS/uRoejIqY/XkKOpnEF6ZbQ2S7TaE4sWeGLvba7kUFs0QTOO+N+nV2dMbdqZf6C8lazw== root@pgbackrest-doc' > /root/.ssh/authorized_keys && \
|
|
echo 'Host *' > /root/.ssh/config && \
|
|
echo ' StrictHostKeyChecking no' >> /root/.ssh/config && \
|
|
chmod 600 /root/.ssh/*
|
|
</variable>
|
|
|
|
<variable key="copy-ca-cert">COPY {[fake-cert-path-relative]}/ca.crt {[ca-cert-path]}/pgbackrest-ca.crt</variable>
|
|
|
|
<!-- Don't allow sudo to disable core dump (suppresses errors, see https://github.com/sudo-project/sudo/issues/42) -->
|
|
<variable key="sudo-disable-core-dump">RUN echo "Set disable_coredump false" >> /etc/sudo.conf</variable>
|
|
</variable-list>
|
|
|
|
<!-- Setup hosts used to build the documentation
|
|
============================================================================================================================ -->
|
|
<host-define if="{[os-type-is-debian]}" image="{[host-image]}" from="{[os-image]}">
|
|
{[copy-ca-cert]}
|
|
|
|
# Fix root tty
|
|
RUN sed -i 's/^mesg n/tty -s \&\& mesg n/g' /root/.profile
|
|
|
|
# Install base packages (suppress dpkg interactive output)
|
|
RUN export DEBIAN_FRONTEND=noninteractive && \
|
|
rm /etc/apt/apt.conf.d/70debconf && \
|
|
apt-get update && \
|
|
apt-get install -y --no-install-recommends sudo ssh wget vim gnupg lsb-release iputils-ping ca-certificates \
|
|
tzdata locales 2>&1
|
|
|
|
{[sudo-disable-core-dump]}
|
|
|
|
# Install CA certificate
|
|
RUN update-ca-certificates
|
|
|
|
# Install PostgreSQL
|
|
RUN RELEASE_CODENAME=`lsb_release -c | awk '{print $2}'` && \
|
|
echo 'deb http://apt.postgresql.org/pub/repos/apt/ '${RELEASE_CODENAME?}'-pgdg main' | \
|
|
tee -a /etc/apt/sources.list.d/pgdg.list && \
|
|
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - && \
|
|
apt-get update && \
|
|
apt-get install -y --no-install-recommends postgresql-common 2>&1 && \
|
|
sed -i 's/^\#create\_main\_cluster.*$/create\_main\_cluster \= false/' \
|
|
/etc/postgresql-common/createcluster.conf && \
|
|
apt-get install -y --no-install-recommends postgresql-{[pg-version]} postgresql-{[pg-version-upgrade]} 2>&1
|
|
|
|
# Create an ssh key for root so all hosts can ssh to each other as root
|
|
RUN \ {[ssh-key-install]}
|
|
|
|
# Add doc user with sudo privileges
|
|
RUN adduser --disabled-password --gecos "" {[host-user]} && \
|
|
echo '%{[host-user]} ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
|
|
|
|
# Set UTF8 encoding
|
|
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
|
|
dpkg-reconfigure --frontend=noninteractive locales && \
|
|
update-locale LANG=en_US.UTF-8
|
|
ENV LANG en_US.UTF-8
|
|
|
|
ENTRYPOINT service ssh restart && bash
|
|
</host-define>
|
|
|
|
<host-define if="{[os-type-is-rhel]}" image="{[host-image]}" from="{[os-image]}">
|
|
ENV container docker
|
|
|
|
{[copy-ca-cert]}
|
|
|
|
RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == \
|
|
systemd-tmpfiles-setup.service ] || rm -f $i; done); \
|
|
rm -f /lib/systemd/system/multi-user.target.wants/*;\
|
|
rm -f /etc/systemd/system/*.wants/*;\
|
|
rm -f /lib/systemd/system/local-fs.target.wants/*; \
|
|
rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
|
|
rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
|
|
rm -f /lib/systemd/system/basic.target.wants/*;\
|
|
rm -f /lib/systemd/system/anaconda.target.wants/*;
|
|
|
|
VOLUME [ "/sys/fs/cgroup" ]
|
|
|
|
# Install packages
|
|
RUN yum install -y openssh-server openssh-clients sudo wget vim 2>&1
|
|
|
|
# Install CA certificate
|
|
RUN update-ca-trust extract
|
|
|
|
# Regenerate SSH keys
|
|
RUN rm -f /etc/ssh/ssh_host_rsa_key* && \
|
|
ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key && \
|
|
rm -f /etc/ssh/ssh_host_dsa_key* && \
|
|
ssh-keygen -t dsa -f /etc/ssh/ssh_host_dsa_key
|
|
|
|
# Install PGDG PostgreSQL repository
|
|
RUN rpm --import http://yum.postgresql.org/RPM-GPG-KEY-PGDG-10 && \
|
|
rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-8-x86_64/pgdg-redhat-repo-latest.noarch.rpm
|
|
|
|
# Disable default PostgreSQL repository
|
|
RUN command -v dnf >/dev/null 2>&1 && dnf -qy module disable postgresql || true
|
|
|
|
# Install PostgreSQL
|
|
RUN yum install -y postgresql{[pg-version-nodot]}-server postgresql{[pg-version-upgrade-nodot]}-server
|
|
|
|
# Create an ssh key for root so all hosts can ssh to each other as root
|
|
RUN \ {[ssh-key-install]}
|
|
|
|
# Add doc user with sudo privileges
|
|
RUN adduser -n {[host-user]} && \
|
|
echo '{[host-user]} ALL=(ALL) NOPASSWD: ALL' > /etc/sudoers.d/{[host-user]}
|
|
|
|
# Enable the user session service so logons are allowed
|
|
RUN echo "[Install]" >> /usr/lib/systemd/system/systemd-user-sessions.service && \
|
|
echo "[WantedBy=default.target]" >> /usr/lib/systemd/system/systemd-user-sessions.service && \
|
|
systemctl enable systemd-user-sessions.service && \
|
|
mkdir -p /etc/systemd/system/default.target.wants && \
|
|
ln -s /usr/lib/systemd/system/systemd-user-sessions.service \
|
|
/etc/systemd/system/default.target.wants/systemd-user-sessions.service
|
|
|
|
# Set locale
|
|
RUN echo en_US.UTF-8 UTF-8 > /etc/locale.conf
|
|
|
|
# Add path to PostgreSQL
|
|
ENV PATH=/usr/pgsql-{[pg-version]}/bin:$PATH
|
|
|
|
CMD ["/usr/sbin/init"]
|
|
</host-define>
|
|
|
|
<!-- ======================================================================================================================= -->
|
|
<block-define id="setup-ssh-intro">
|
|
<p><backrest/> requires passwordless SSH to enable communication between the hosts.</p>
|
|
</block-define>
|
|
|
|
<block-define id="setup-ssh">
|
|
<execute-list host="{[setup-ssh-host]}">
|
|
<title>Create <host>{[setup-ssh-host]}</host> host key pair</title>
|
|
|
|
<execute user="{[setup-ssh-user]}">
|
|
<exe-cmd>mkdir -m 750 -p {[setup-ssh-user-home-path]}/.ssh</exe-cmd>
|
|
</execute>
|
|
<execute user="{[setup-ssh-user]}">
|
|
<exe-cmd>ssh-keygen -f {[setup-ssh-user-home-path]}/.ssh/id_rsa
|
|
-t rsa -b 4096 -N ""</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Exchange keys between <host>{[host-repo1]}</host> and <host>{[setup-ssh-host]}</host>.</p>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Copy <host>{[setup-ssh-host]}</host> public key to <host>{[host-repo1]}</host></title>
|
|
|
|
<execute user="root" err-suppress="y" user-force="y">
|
|
<exe-cmd>
|
|
(echo -n 'no-agent-forwarding,no-X11-forwarding,no-port-forwarding,' &&
|
|
echo -n 'command="{[br-bin]} ${SSH_ORIGINAL_COMMAND#* }" ' &&
|
|
sudo ssh root@{[setup-ssh-host]} cat {[setup-ssh-user-home-path]}/.ssh/id_rsa.pub) |
|
|
sudo -u pgbackrest tee -a {[br-home-path]}/.ssh/authorized_keys
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[setup-ssh-host]}">
|
|
<title>Copy <host>{[host-repo1]}</host> public key to <host>{[setup-ssh-host]}</host></title>
|
|
|
|
<execute user="root" err-suppress="y" user-force="y">
|
|
<exe-cmd>
|
|
(echo -n 'no-agent-forwarding,no-X11-forwarding,no-port-forwarding,' &&
|
|
echo -n 'command="{[br-bin]} ${SSH_ORIGINAL_COMMAND#* }" ' &&
|
|
sudo ssh root@{[host-repo1]} cat {[br-home-path]}/.ssh/id_rsa.pub) |
|
|
sudo -u {[setup-ssh-user]} tee -a {[setup-ssh-user-home-path]}/.ssh/authorized_keys
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Test that connections can be made from <host>{[host-repo1]}</host> to <host>{[setup-ssh-host]}</host> and vice versa.</p>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Test connection from <host>{[host-repo1]}</host> to <host>{[setup-ssh-host]}</host></title>
|
|
|
|
<execute user="{[br-user]}" err-suppress="y">
|
|
<exe-cmd>ssh {[setup-ssh-user]}@{[setup-ssh-host]}</exe-cmd>
|
|
<exe-cmd-extra>-o StrictHostKeyChecking=no</exe-cmd-extra>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[setup-ssh-host]}">
|
|
<title>Test connection from <host>{[setup-ssh-host]}</host> to <host>{[host-repo1]}</host></title>
|
|
|
|
<execute user="{[setup-ssh-user]}" err-suppress="y">
|
|
<exe-cmd>ssh pgbackrest@{[host-repo1]}</exe-cmd>
|
|
<exe-cmd-extra>-o StrictHostKeyChecking=no</exe-cmd-extra>
|
|
</execute>
|
|
</execute-list>
|
|
</block-define>
|
|
|
|
<!-- ======================================================================================================================= -->
|
|
<block-define if="'{[package]}' eq 'none'" id="br-install">
|
|
<p><backrest/> needs to be installed from a package or installed manually as shown here.</p>
|
|
|
|
<execute-list host="{[host-build]}">
|
|
<title>Install dependencies</title>
|
|
|
|
<execute if="{[os-type-is-debian]}" user="root" pre="y">
|
|
<exe-cmd>
|
|
apt-get install postgresql-client libxml2</exe-cmd>
|
|
<exe-cmd-extra>-y 2>&1</exe-cmd-extra>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-rhel]}" user="root" pre="y">
|
|
<exe-cmd>
|
|
yum install postgresql-libs
|
|
</exe-cmd>
|
|
<exe-cmd-extra>-y 2>&1</exe-cmd-extra>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[br-install-host]}">
|
|
<title>Copy <backrest/> binary from build host</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>scp {[host-build]}:/build/pgbackrest-release-{[version]}/src/pgbackrest /usr/bin</exe-cmd>
|
|
<exe-cmd-extra>2>&1</exe-cmd-extra>
|
|
</execute>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>chmod 755 /usr/bin/pgbackrest</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p><backrest/> requires log and configuration directories and a configuration file.</p>
|
|
|
|
<execute-list host="{[br-install-host]}">
|
|
<title>Create <backrest/> configuration file and directories</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>mkdir -p -m 770 /var/log/pgbackrest</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>chown {[br-install-user]}:{[br-install-group]} /var/log/pgbackrest</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>mkdir -p {[backrest-config-path]}</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>mkdir -p {[backrest-config-include-path]}</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>touch {[backrest-config-demo]}</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>chmod 640 {[backrest-config-demo]}</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>chown {[br-install-user]}:{[br-install-group]} {[backrest-config-demo]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</block-define>
|
|
|
|
<block-define if="'{[package]}' ne 'none'" id="br-install">
|
|
<execute-list host="{[br-install-host]}">
|
|
<title>Install <backrest/> from package</title>
|
|
|
|
<execute if="{[os-type-is-debian]} && '{[package]}' ne 'apt'" user="root" err-suppress="y" show="n">
|
|
<exe-cmd>dpkg -i {[pgbackrest-repo-path]}/{[package]}</exe-cmd>
|
|
<exe-cmd-extra> 2>&1</exe-cmd-extra>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-debian]} && '{[package]}' ne 'apt'" user="root" show="n">
|
|
<exe-cmd>apt-get -y install -f</exe-cmd>
|
|
<exe-cmd-extra>-y 2>&1</exe-cmd-extra>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-debian]} && '{[package]}' ne 'apt'" user="root" skip="y">
|
|
<exe-cmd>apt-get install pgbackrest</exe-cmd>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-debian]} && '{[package]}' eq 'apt'" user="root" show="n">
|
|
<exe-cmd>apt-get update</exe-cmd>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-debian]} && '{[package]}' eq 'apt'" user="root" show="y">
|
|
<exe-cmd>apt-get install pgbackrest</exe-cmd>
|
|
<exe-cmd-extra>-y 2>&1</exe-cmd-extra>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-rhel]} && '{[package]}' ne 'yum'" user="root" show="n">
|
|
<exe-cmd>yum -y install {[pgbackrest-repo-path]}/{[package]}</exe-cmd>
|
|
<exe-cmd-extra>-y 2>&1</exe-cmd-extra>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-rhel]} && '{[package]}' ne 'yum'" user="root" skip="y">
|
|
<exe-cmd>yum install pgbackrest</exe-cmd>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-rhel]} && '{[package]}' eq 'yum'" user="root">
|
|
<exe-cmd>yum install pgbackrest</exe-cmd>
|
|
<exe-cmd-extra>-y 2>&1</exe-cmd-extra>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list if="'{[br-install-user]}' ne 'postgres'" host="{[br-install-host]}">
|
|
<title>Update permissions on configuration file and directories</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>chown {[br-install-user]}:{[br-install-group]} /var/log/pgbackrest</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>chown {[br-install-user]}:{[br-install-group]} {[backrest-config-demo]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</block-define>
|
|
|
|
<block-define id="br-install-repo">
|
|
<execute-list if="'{[package]}' eq 'none'" host="{[br-install-host]}">
|
|
<title>Create the <backrest/> repository</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>mkdir -p {[backrest-repo-path]}</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>chmod 750 {[backrest-repo-path]}</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>chown {[br-install-user]}:{[br-install-group]} {[backrest-repo-path]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list if="'{[package]}' ne 'none' && '{[br-install-user]}' ne 'postgres'" host="{[br-install-host]}">
|
|
<title>Update permissions on the <backrest/> repository</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>chown {[br-install-user]}:{[br-install-group]} {[backrest-repo-path]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</block-define>
|
|
|
|
<!-- ======================================================================================================================= -->
|
|
<block-define id="azure-setup">
|
|
<p><backrest/> supports locating repositories in <proper>Azure-compatible</proper> object stores. The container used to store the repository must be created in advance &mdash; <backrest/> will not do it automatically. The repository can be located in the container root (<path>/</path>) but it's usually best to place it in a subpath so object store logs or other data can also be stored in the container without conflicts.</p>
|
|
|
|
<backrest-config host="{[azure-setup-host]}" file="{[backrest-config-demo]}" owner="{[azure-setup-config-owner]}">
|
|
<title>Configure <proper>Azure</proper></title>
|
|
|
|
<backrest-config-option section="global" key="repo{[azure-setup-repo-id]}-type">azure</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[azure-setup-repo-id]}-path">/{[azure-repo]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[azure-setup-repo-id]}-azure-account">{[azure-account]}</backrest-config-option>
|
|
<backrest-config-option if="'{[azure-key-type]}' ne 'shared'" section="global" key="repo{[azure-setup-repo-id]}-azure-key-type">{[azure-key-type]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[azure-setup-repo-id]}-azure-key">{[azure-key]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[azure-setup-repo-id]}-azure-container">{[azure-container]}</backrest-config-option>
|
|
<backrest-config-option if="'{[azure-local]}' eq 'y'" section="global" key="repo{[azure-setup-repo-id]}-storage-host">blob.core.windows.net</backrest-config-option>
|
|
<backrest-config-option if="'{[azure-all]}' ne 'y'" section="global" key="repo{[azure-setup-repo-id]}-retention-full">4</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="process-max">4</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<execute-list if="'{[azure-local]}' eq 'y'" host="{[azure-setup-host]}" show="n">
|
|
<title>Create the container</title>
|
|
|
|
<!-- Set host entries to redirect to local azure server -->
|
|
<execute user="root" user-force="y" show="n">
|
|
<exe-cmd>echo "{[host-azure-ip]} blob.core.windows.net" | tee -a /etc/hosts</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="{[azure-setup-user]}" if="'{[azure-setup-create-container]}' eq 'y'" show='n'>
|
|
<exe-cmd>{[project-exe]} --repo={[azure-setup-repo-id]} repo-create</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Shared access signatures may be used by setting the <br-option>repo{[azure-setup-repo-id]}-azure-key-type</br-option> option to <id>sas</id> and the <br-option>repo{[azure-setup-repo-id]}-azure-key</br-option> option to the shared access signature token.</p>
|
|
</block-define>
|
|
|
|
<!-- ======================================================================================================================= -->
|
|
<block-define id="gcs-setup">
|
|
<p><backrest/> supports locating repositories in <proper>GCS-compatible</proper> object stores. The bucket used to store the repository must be created in advance &mdash; <backrest/> will not do it automatically. The repository can be located in the bucket root (<path>/</path>) but it's usually best to place it in a subpath so object store logs or other data can also be stored in the bucket without conflicts.</p>
|
|
|
|
<backrest-config host="{[gcs-setup-host]}" file="{[backrest-config-demo]}" owner="{[gcs-setup-config-owner]}">
|
|
<title>Configure <proper>GCS</proper></title>
|
|
|
|
<backrest-config-option section="global" key="repo{[gcs-setup-repo-id]}-type">gcs</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[gcs-setup-repo-id]}-path">/{[gcs-repo]}</backrest-config-option>
|
|
<backrest-config-option if="'{[gcs-key-type]}' ne 'service'" section="global" key="repo{[gcs-setup-repo-id]}-gcs-key-type">{[gcs-key-type]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[gcs-setup-repo-id]}-gcs-key">{[gcs-key]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[gcs-setup-repo-id]}-gcs-bucket">{[gcs-bucket]}</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="process-max">4</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>When running in <proper>GCE</proper> set <br-option>repo{[gcs-setup-repo-id]}-gcs-key-type=auto</br-option> to automatically authenticate using the instance service account.</p>
|
|
</block-define>
|
|
|
|
<!-- ======================================================================================================================= -->
|
|
<block-define id="s3-setup">
|
|
<p><backrest/> supports locating repositories in <proper>S3-compatible</proper> object stores. The bucket used to store the repository must be created in advance &mdash; <backrest/> will not do it automatically. The repository can be located in the bucket root (<path>/</path>) but it's usually best to place it in a subpath so object store logs or other data can also be stored in the bucket without conflicts.</p>
|
|
|
|
<backrest-config host="{[s3-setup-host]}" file="{[backrest-config-demo]}" owner="{[s3-setup-config-owner]}">
|
|
<title>Configure <proper>S3</proper></title>
|
|
|
|
<backrest-config-option section="global" key="repo{[s3-setup-repo-id]}-type">s3</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[s3-setup-repo-id]}-path">/{[s3-repo]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[s3-setup-repo-id]}-s3-key">{[s3-key]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[s3-setup-repo-id]}-s3-key-secret">{[s3-key-secret]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[s3-setup-repo-id]}-s3-bucket">{[s3-bucket]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[s3-setup-repo-id]}-s3-endpoint">{[s3-endpoint]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[s3-setup-repo-id]}-s3-region">{[s3-region]}</backrest-config-option>
|
|
<backrest-config-option if="'{[s3-all]}' ne 'y'" section="global" key="repo{[s3-setup-repo-id]}-retention-full">4</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo{[s3-setup-repo-id]}-storage-host" remove="y"/>
|
|
|
|
<backrest-config-option section="global" key="process-max">4</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<execute-list if="'{[s3-local]}' eq 'y'" host="{[s3-setup-host]}" show="n">
|
|
<title>Create the bucket</title>
|
|
|
|
<!-- Set host entries to redirect AWS to local s3 server -->
|
|
<execute user="root" user-force="y" show="n">
|
|
<exe-cmd>echo "{[host-s3-ip]} {[s3-bucket]}.{[s3-endpoint]} {[s3-endpoint]}" | tee -a /etc/hosts</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="{[s3-setup-user]}" if="'{[s3-setup-create-bucket]}' eq 'y'" show='n'>
|
|
<exe-cmd>{[project-exe]} --repo={[s3-setup-repo-id]} repo-create</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<admonition type="note">The region and endpoint will need to be configured to where the bucket is located. The values given here are for the <id>{[s3-region]}</id> region.</admonition>
|
|
</block-define>
|
|
|
|
<!-- SECTION => INTRODUCTION -->
|
|
<section id="introduction">
|
|
<title>Introduction</title>
|
|
|
|
<!-- Create Azure server first to allow it time to boot before being used -->
|
|
<host-add if="'{[azure-local]}' eq 'y'" id="{[host-azure-id]}" name="{[host-azure]}" user="root" image="mcr.microsoft.com/azure-storage/azurite" os="{[os-type]}" option="-v {[fake-cert-path]}/azure-server.crt:/root/public.crt:ro -v {[fake-cert-path]}/azure-server.key:/root/private.key:ro -e AZURITE_ACCOUNTS='{[azure-account]}:{[azure-key]}'" param="azurite-blob --blobPort 443 --blobHost 0.0.0.0 --cert=/root/public.crt --key=/root/private.key" update-hosts="n"/>
|
|
|
|
<!-- Create S3 server first to allow it time to boot before being used -->
|
|
<host-add if="'{[s3-local]}' eq 'y'" id="{[host-s3-id]}" name="{[host-s3]}" user="root" image="minio/minio" os="{[os-type]}" option="-v {[fake-cert-path]}/s3-server.crt:/root/.minio/certs/public.crt:ro -v {[fake-cert-path]}/s3-server.key:/root/.minio/certs/private.key:ro -e MINIO_REGION={[s3-region]} -e MINIO_DOMAIN={[s3-endpoint]} -e MINIO_BROWSER=off -e MINIO_ACCESS_KEY={[s3-key]} -e MINIO_SECRET_KEY={[s3-key-secret]}" param="server /data --address :443" update-hosts="n"/>
|
|
|
|
<p>This user guide is intended to be followed sequentially from beginning to end &mdash; each section depends on the last. For example, the <link section="/backup">Backup</link> section relies on setup that is performed in the <link section="/quickstart">Quick Start</link> section. Once <backrest/> is up and running then skipping around is possible but following the user guide in order is recommended the first time through.</p>
|
|
|
|
<p>Although the examples are targeted at {[user-guide-os]} and <postgres/> {[pg-version-min]}-{[pg-version-max]}, it should be fairly easy to apply this guide to any Unix distribution and <postgres/> version. The only OS-specific commands are those to create, start, stop, and drop <postgres/> clusters. The <backrest/> commands will be the same on any Unix system though the location to install the executable may vary.
|
|
|
|
Configuration information and documentation for PostgreSQL can be found in the <postgres/> <link url='http://www.postgresql.org/docs/{[pg-version]}/static/index.html'>Manual</link>.</p>
|
|
|
|
<p>A somewhat novel approach is taken to documentation in this user guide. Each command is run on a virtual machine when the documentation is built from the XML source. This means you can have a high confidence that the commands work correctly in the order presented. Output is captured and displayed below the command when appropriate. If the output is not included it is because it was deemed not relevant or was considered a distraction from the narrative.</p>
|
|
|
|
<p>All commands are intended to be run as an unprivileged user that has sudo privileges for both the <user>root</user> and <user>postgres</user> users. It's also possible to run the commands directly as their respective users without modification and in that case the <cmd>sudo</cmd> commands can be stripped off.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => CONCEPTS -->
|
|
<section id="concept">
|
|
<title>Concepts</title>
|
|
|
|
<p>The following concepts are defined as they are relevant to <backrest/>, <postgres/>, and this user guide.</p>
|
|
|
|
<!-- SECTION => CONCEPTS - BACKUP -->
|
|
<section id="backup">
|
|
<title>Backup</title>
|
|
|
|
<p>A backup is a consistent copy of a database cluster that can be restored to recover from a hardware failure, to perform Point-In-Time Recovery, or to bring up a new standby.</p>
|
|
|
|
<p><b>Full Backup</b>: <backrest/> copies the entire contents of the database cluster to the backup. The first backup of the database cluster is always a Full Backup. <backrest/> is always able to restore a full backup directly. The full backup does not depend on any files outside of the full backup for consistency.</p>
|
|
|
|
<p><b>Differential Backup</b>: <backrest/> copies only those database cluster files that have changed since the last full backup. <backrest/> restores a differential backup by copying all of the files in the chosen differential backup and the appropriate unchanged files from the previous full backup. The advantage of a differential backup is that it requires less disk space than a full backup, however, the differential backup and the full backup must both be valid to restore the differential backup.</p>
|
|
|
|
<p><b>Incremental Backup</b>: <backrest/> copies only those database cluster files that have changed since the last backup (which can be another incremental backup, a differential backup, or a full backup). As an incremental backup only includes those files changed since the prior backup, they are generally much smaller than full or differential backups. As with the differential backup, the incremental backup depends on other backups to be valid to restore the incremental backup. Since the incremental backup includes only those files since the last backup, all prior incremental backups back to the prior differential, the prior differential backup, and the prior full backup must all be valid to perform a restore of the incremental backup. If no differential backup exists then all prior incremental backups back to the prior full backup, which must exist, and the full backup itself must be valid to restore the incremental backup.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => CONCEPTS - RESTORE -->
|
|
<section id="restore">
|
|
<title>Restore</title>
|
|
|
|
<p>A restore is the act of copying a backup to a system where it will be started as a live database cluster. A restore requires the backup files and one or more WAL segments in order to work correctly.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => CONCEPTS - WAL -->
|
|
<section id="wal">
|
|
<title>Write Ahead Log (WAL)</title>
|
|
|
|
<p>WAL is the mechanism that <postgres/> uses to ensure that no committed changes are lost. Transactions are written sequentially to the WAL and a transaction is considered to be committed when those writes are flushed to disk. Afterwards, a background process writes the changes into the main database cluster files (also known as the heap). In the event of a crash, the WAL is replayed to make the database consistent.</p>
|
|
|
|
<p>WAL is conceptually infinite but in practice is broken up into individual 16MB files called segments. WAL segments follow the naming convention <id>0000000100000A1E000000FE</id> where the first 8 hexadecimal digits represent the timeline and the next 16 digits are the logical sequence number (LSN).</p>
|
|
|
|
</section>
|
|
|
|
<!-- SECTION => CONCEPTS - ENCRYPTION -->
|
|
<section id="encryption">
|
|
<title>Encryption</title>
|
|
|
|
<p>Encryption is the process of converting data into a format that is unrecognizable unless the appropriate password (also referred to as passphrase) is provided.</p>
|
|
|
|
<p><backrest/> will encrypt the repository based on a user-provided password, thereby preventing unauthorized access to data stored within the repository.</p>
|
|
</section>
|
|
</section>
|
|
|
|
<!-- SECTION => UPGRADING -->
|
|
<section id="upgrading">
|
|
<title>Upgrading {[project]}</title>
|
|
|
|
<section id="v1-v2">
|
|
<title>Upgrading {[project]} from v1 to v2</title>
|
|
|
|
<p>Upgrading from <proper>v1</proper> to <proper>v2</proper> is fairly straight-forward. The repository format has not changed and all non-deprecated options from <proper>v1</proper> are accepted, so for most installations it is simply a matter of installing the new version.</p>
|
|
|
|
<p>However, there are a few caveats:</p>
|
|
|
|
<list>
|
|
<list-item>The deprecated <br-option>thread-max</br-option> option is no longer valid. Use <br-option>process-max</br-option> instead.</list-item>
|
|
|
|
<list-item>The deprecated <br-option>archive-max-mb</br-option> option is no longer valid. This has been replaced with the <br-option>archive-push-queue-max</br-option> option which has different semantics.</list-item>
|
|
|
|
<list-item>The default for the <br-option>backup-user</br-option> option has changed from <id>backrest</id> to <id>pgbackrest</id>.</list-item>
|
|
|
|
<list-item>In <proper>v2.02</proper> the default location of the <backrest/> configuration file has changed from <file>/etc/pgbackrest.conf</file> to <file>/etc/pgbackrest/pgbackrest.conf</file>. If <file>/etc/pgbackrest/pgbackrest.conf</file> does not exist, the <file>/etc/pgbackrest.conf</file> file will be loaded instead, if it exists.</list-item>
|
|
</list>
|
|
|
|
<p>Many option names have changed to improve consistency although the old names from <proper>v1</proper> are still accepted. In general, <id>db-*</id> options have been renamed to <id>pg-*</id> and <id>backup-*</id>/<id>retention-*</id> options have been renamed to <id>repo-*</id> when appropriate.</p>
|
|
|
|
<p><postgres/> and repository options must be indexed when using the new names introduced in <proper>v2</proper>, e.g. <br-option>pg1-host</br-option>, <br-option>pg1-path</br-option>, <br-option>repo1-path</br-option>, <br-option>repo1-type</br-option>, etc. Only one repository is allowed currently but more flexibility is planned for <proper>v2</proper>.</p>
|
|
</section>
|
|
</section>
|
|
|
|
<!-- ======================================================================================================================= -->
|
|
<section if="'{[package]}' eq 'none'" id="build">
|
|
<title>Build</title>
|
|
|
|
<p if="{[os-type-is-debian]}">{[user-guide-os]} packages for <backrest/> are available at <link url="https://www.postgresql.org/download/linux/ubuntu/">apt.postgresql.org</link>. If they are not provided for your distribution/version it is easy to download the source and install manually.</p>
|
|
|
|
<p if="{[os-type-is-rhel]}">{[user-guide-os]} packages for <backrest/> are available from <link url="{[crunchy-url-base]}">Crunchy Data</link> or <link url="http://yum.postgresql.org">yum.postgresql.org</link>, but it is also easy to download the source and install manually.</p>
|
|
|
|
<host-add id="{[host-build-id]}" name="{[host-build]}" user="{[host-build-user]}" image="{[host-build-image]}" os="{[os-type]}" mount="{[host-build-mount]}" option="{[host-option]}"/>
|
|
|
|
<p>When building from source it is best to use a build host rather than building on production. Many of the tools required for the build should generally not be installed in production. <backrest/> consists of a single executable so it is easy to copy to a new host once it is built.</p>
|
|
|
|
<execute-list host="{[host-build]}">
|
|
<title>Download version <id>{[version]}</id> of <backrest/> to pre-created <path>/build</path> path</title>
|
|
|
|
<!-- This is shown to the user but never actually run for the very good reason that the release is not available before the documentation is built -->
|
|
<execute skip="y">
|
|
<exe-cmd>wget -q -O -
|
|
{[github-url-release]}/{[version]}.tar.gz |
|
|
tar zx -C /build</exe-cmd>
|
|
</execute>
|
|
|
|
<!-- These commands simulate what the command above would do if it could be run -->
|
|
<execute user="root" show="n">
|
|
<exe-cmd>mkdir -p /build/pgbackrest-release-{[version]}</exe-cmd>
|
|
</execute>
|
|
<execute user="root" show="n">
|
|
<exe-cmd>cp -r {[pgbackrest-repo-path]}/src /build/pgbackrest-release-{[version]}</exe-cmd>
|
|
</execute>
|
|
<execute user="root" show="n">
|
|
<exe-cmd>chown -R {[host-build-user]} /build/pgbackrest-release-{[version]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-build]}">
|
|
<title>Install build dependencies</title>
|
|
|
|
<execute if="{[os-type-is-debian]}" user="root" show="n" pre="y">
|
|
<exe-cmd>apt-get update</exe-cmd>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-debian]}" user="root" pre="y">
|
|
<exe-cmd>
|
|
apt-get install make gcc libpq-dev libssl-dev libxml2-dev pkg-config
|
|
liblz4-dev libzstd-dev libbz2-dev libz-dev
|
|
</exe-cmd>
|
|
<exe-cmd-extra>-y 2>&1</exe-cmd-extra>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-rhel]}" user="root" pre="y">
|
|
<exe-cmd>
|
|
yum install make gcc postgresql{[pg-version-nodot]}-devel
|
|
openssl-devel libxml2-devel lz4-devel libzstd-devel bzip2-devel
|
|
</exe-cmd>
|
|
<exe-cmd-extra>-y 2>&1</exe-cmd-extra>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-build]}">
|
|
<title>Configure and compile <backrest/></title>
|
|
|
|
<execute>
|
|
<exe-cmd>cd /build/pgbackrest-release-{[version]}/src && ./configure && make</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => INSTALLATION -->
|
|
<section id="installation">
|
|
<title>Installation</title>
|
|
|
|
<p>A new host named <host>pg1</host> is created to contain the demo cluster and run <backrest/> examples.</p>
|
|
|
|
<host-add id="{[host-pg1-id]}" name="{[host-pg1]}" user="{[host-pg1-user]}" image="{[host-pg1-image]}" os="{[os-type]}" mount="{[host-pg1-mount]}" option="{[host-option]}"/>
|
|
|
|
<!-- <execute-list if="{[pg-version]} >= 11" host="{[host-pg1]}">
|
|
<title>Create <user>{[br-user]}</user> user</title>
|
|
|
|
<execute if="{[os-type-is-debian]}" user="root">
|
|
<exe-cmd>adduser {[dash]}-ingroup {[pg-group]} {[dash]}-disabled-password {[dash]}-gecos "" {[br-user]}</exe-cmd>
|
|
</execute>
|
|
<execute if="{[os-type-is-rhel]}" user="root">
|
|
<exe-cmd>adduser -g{[pg-group]} -n {[br-user]}</exe-cmd>
|
|
</execute>
|
|
</execute-list> -->
|
|
|
|
<block id="br-install">
|
|
<block-variable-replace key="br-install-host">{[host-pg1]}</block-variable-replace>
|
|
<block-variable-replace key="br-install-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="br-install-group">postgres</block-variable-replace>
|
|
</block>
|
|
|
|
<p><backrest/> should now be properly installed but it is best to check. If any dependencies were missed then you will get an error when running <backrest/> from the command line.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Make sure the installation worked</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>{[project-exe]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART -->
|
|
<section id="quickstart">
|
|
<title>Quick Start</title>
|
|
|
|
<p>The Quick Start section will cover basic configuration of <backrest/> and <postgres/> and introduce the <cmd>backup</cmd>, <cmd>restore</cmd>, and <cmd>info</cmd> commands.</p>
|
|
|
|
<!-- SECTION => QUICKSTART - SETUP DEMO CLUSTER -->
|
|
<section id="setup-demo-cluster">
|
|
<title>Setup Demo Cluster</title>
|
|
|
|
<p>Creating the demo cluster is optional but is strongly recommended, especially for new users, since the example commands in the user guide reference the demo cluster; the examples assume the demo cluster is running on the default port (i.e. 5432). The cluster will not be started until a later section because there is still some configuration to do.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create the demo cluster</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>
|
|
{[pg-bin-path]}/initdb
|
|
-D {[pg-path]} -k -A peer</exe-cmd>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-debian]}" user="root" output="y" filter="n">
|
|
<exe-cmd>{[pg-cluster-create]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>By default <postgres/> will only accept local connections. The examples in this guide will require connections from other servers so <pg-option>listen_addresses</pg-option> is configured to listen on all interfaces. This may not be appropriate for secure installations.</p>
|
|
|
|
<postgres-config host="{[host-pg1]}" file="{[postgres-config-demo]}">
|
|
<title>Set <pg-option>listen_addresses</pg-option></title>
|
|
|
|
<postgres-config-option key="listen_addresses">'*'</postgres-config-option>
|
|
</postgres-config>
|
|
|
|
<p>For demonstration purposes the <pg-option>log_line_prefix</pg-option> setting will be minimally configured. This keeps the log output as brief as possible to better illustrate important information.</p>
|
|
|
|
<postgres-config host="{[host-pg1]}" file="{[postgres-config-demo]}">
|
|
<title>Set <pg-option>log_line_prefix</pg-option></title>
|
|
|
|
<postgres-config-option key="log_line_prefix">''</postgres-config-option>
|
|
</postgres-config>
|
|
|
|
<p if="{[os-type-is-rhel]}">By default {[user-guide-os]} includes the day of the week in the log filename. This makes automating the user guide a bit more complicated so the <pg-option>log_filename</pg-option> is set to a constant.</p>
|
|
|
|
<postgres-config host="{[host-pg1]}" if="{[os-type-is-rhel]}" file="{[postgres-config-demo]}">
|
|
<title>Set <pg-option>log_filename</pg-option></title>
|
|
|
|
<postgres-config-option key="log_filename">'postgresql.log'</postgres-config-option>
|
|
</postgres-config>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - CONFIGURE STANZA -->
|
|
<section id="configure-stanza">
|
|
<title>Configure Cluster Stanza</title>
|
|
|
|
<option-description key="stanza"/>
|
|
|
|
<p>The name 'demo' describes the purpose of this cluster accurately so that will also make a good stanza name.</p>
|
|
|
|
<p><backrest/> needs to know where the base data directory for the <postgres/> cluster is located. The path can be requested from <postgres/> directly but in a recovery scenario the <postgres/> process will not be available. During backups the value supplied to <backrest/> will be compared against the path that <postgres/> is running on and they must be equal or the backup will return an error. Make sure that <br-option>pg-path</br-option> is exactly equal to <pg-option>data_directory</pg-option> in <file>postgresql.conf</file>.</p>
|
|
|
|
<p>By default {[user-guide-os]} stores clusters in <path>{[pg-path-default]}</path> so it is easy to determine the correct path for the data directory.</p>
|
|
|
|
<p>When creating the <file>{[backrest-config-demo]}</file> file, the database owner (usually <id>postgres</id>) must be granted read privileges.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Configure the <postgres/> cluster data directory</title>
|
|
|
|
<backrest-config-option section="demo" key="pg1-path">{[pg-path]}</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="log-level-stderr">off</backrest-config-option>
|
|
<backrest-config-option section="global" key="log-timestamp">n</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p><backrest/> configuration files follow the Windows INI convention. Sections are denoted by text in brackets and key/value pairs are contained in each section. Lines beginning with <id>#</id> are ignored and can be used as comments.</p>
|
|
|
|
<p>There are multiple ways the <backrest/> configuration files can be loaded:</p>
|
|
<list>
|
|
<list-item><br-option>config</br-option> and <br-option>config-include-path</br-option> are default: the default config file will be loaded, if it exists, and <file>*.conf</file> files in the default config include path will be appended, if they exist.</list-item>
|
|
<list-item><br-option>config</br-option> option is specified: only the specified config file will be loaded and is expected to exist.</list-item>
|
|
<list-item><br-option>config-include-path</br-option> is specified: <file>*.conf</file> files in the config include path will be loaded and the path is required to exist. The default config file will be be loaded if it exists. If it is desirable to load only the files in the specified config include path, then the <br-option>--no-config</br-option> option can also be passed.</list-item>
|
|
<list-item><br-option>config</br-option> and <br-option>config-include-path</br-option> are specified: using the user-specified values, the config file will be loaded and <file>*.conf</file> files in the config include path will be appended. The files are expected to exist.</list-item>
|
|
<list-item><br-option>config-path</br-option> is specified: this setting will override the base path for the default location of the config file and/or the base path of the default config-include-path setting unless the config and/or config-include-path option is explicitly set.</list-item>
|
|
</list>
|
|
|
|
<p>The files are concatenated as if they were one big file; order doesn't matter, but there is precedence based on sections. The precedence (highest to lowest) is:</p>
|
|
|
|
<list>
|
|
<list-item>[<i>stanza</i>:<i>command</i>]</list-item>
|
|
<list-item>[<i>stanza</i>]</list-item>
|
|
<list-item>[global:<i>command</i>]</list-item>
|
|
<list-item>[global]</list-item>
|
|
</list>
|
|
|
|
<admonition type="note"><br-option>--config</br-option>, <br-option>--config-include-path</br-option> and <br-option>--config-path</br-option> are command-line only options.</admonition>
|
|
|
|
<p><backrest/> can also be configured using environment variables as described in the <link url="command.html">command reference</link>.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Configure <br-option>log-path</br-option> using the environment</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>bash -c '
|
|
export PGBACKREST_LOG_PATH=/path/set/by/env &&
|
|
{[project-exe]} --log-level-console=error help backup log-path'</exe-cmd>
|
|
<exe-highlight>current\: \/path\/set\/by\/env</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - CREATE REPOSITORY -->
|
|
<section id="create-repository">
|
|
<title>Create the Repository</title>
|
|
|
|
<option-description key="repo-path"/>
|
|
|
|
<p>For this demonstration the repository will be stored on the same host as the <postgres/> server. This is the simplest configuration and is useful in cases where traditional backup software is employed to backup the database host.</p>
|
|
|
|
<block id="br-install-repo">
|
|
<block-variable-replace key="br-install-host">{[host-pg1]}</block-variable-replace>
|
|
<block-variable-replace key="br-install-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="br-install-group">postgres</block-variable-replace>
|
|
</block>
|
|
|
|
<p>The repository path must be configured so <backrest/> knows where to find it.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Configure the <backrest/> repository path</title>
|
|
|
|
<backrest-config-option section="global" key="repo1-path">{[backrest-repo-path]}</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p if="!{[object-any-all]}">Multiple repositories may also be configured. See <link section="/multi-repo">Multiple Repositories</link> for details.</p>
|
|
</section>
|
|
|
|
<!-- =================================================================================================================== -->
|
|
<section id="azure-support" if="'{[azure-all]}' eq 'y'">
|
|
<title>Azure-Compatible Object Store Support</title>
|
|
|
|
<block id="azure-setup">
|
|
<block-variable-replace key="azure-setup-repo-id">1</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-host">{[host-pg1]}</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-config-owner">postgres:postgres</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-create-container">y</block-variable-replace>
|
|
</block>
|
|
</section>
|
|
|
|
<!-- =================================================================================================================== -->
|
|
<section id="gcs-support" if="'{[gcs-all]}' eq 'y'">
|
|
<title>GCS-Compatible Object Store Support</title>
|
|
|
|
<block id="gcs-setup">
|
|
<block-variable-replace key="gcs-setup-repo-id">1</block-variable-replace>
|
|
<block-variable-replace key="gcs-setup-host">{[host-pg1]}</block-variable-replace>
|
|
<block-variable-replace key="gcs-setup-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="gcs-setup-config-owner">postgres:postgres</block-variable-replace>
|
|
</block>
|
|
</section>
|
|
|
|
<!-- =================================================================================================================== -->
|
|
<section id="s3-support" if="'{[s3-all]}' eq 'y'">
|
|
<title>S3-Compatible Object Store Support</title>
|
|
|
|
<block id="s3-setup">
|
|
<block-variable-replace key="s3-setup-repo-id">1</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-host">{[host-pg1]}</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-config-owner">postgres:postgres</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-create-bucket">y</block-variable-replace>
|
|
</block>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - CONFIGURE ARCHIVING -->
|
|
<section id="configure-archiving">
|
|
<title>Configure Archiving</title>
|
|
|
|
<p>Backing up a running <postgres/> cluster requires WAL archiving to be enabled. Note that <i>at least</i> one WAL segment will be created during the backup process even if no explicit writes are made to the cluster.</p>
|
|
|
|
<postgres-config host="{[host-pg1]}" file="{[postgres-config-demo]}">
|
|
<title>Configure archive settings</title>
|
|
|
|
<postgres-config-option key="archive_command">'{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} archive-push %p'</postgres-config-option>
|
|
<postgres-config-option key="archive_mode">on</postgres-config-option>
|
|
<postgres-config-option key="wal_level">{[wal-level]}</postgres-config-option>
|
|
<postgres-config-option key="max_wal_senders">3</postgres-config-option>
|
|
</postgres-config>
|
|
|
|
<p><id>%p</id> is how <postgres/> specifies the location of the WAL segment to be archived. Setting <pg-option>wal_level</pg-option> to at least <pg-setting>{[wal-level]}</pg-setting> and increasing <pg-option>max_wal_senders</pg-option> is a good idea even if there are currently no replicas as this will allow them to be added later without restarting the primary cluster.</p>
|
|
|
|
<p>The <postgres/> cluster must be restarted after making these changes and before performing a backup.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Restart the {[postgres-cluster-demo]} cluster</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-restart]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>When archiving a WAL segment is expected to take more than 60 seconds (the default) to reach the <backrest/> repository, then the <backrest/> <br-option>archive-timeout</br-option> option should be increased. Note that this option is not the same as the <postgres/> <pg-option>archive_timeout</pg-option> option which is used to force a WAL segment switch; useful for databases where there are long periods of inactivity. For more information on the <postgres/> <pg-option>archive_timeout</pg-option> option, see <postgres/> <link url="https://www.postgresql.org/docs/current/static/runtime-config-wal.html">Write Ahead Log</link>.</p>
|
|
|
|
<p>The <cmd>archive-push</cmd> command can be configured with its own options. For example, a lower compression level may be set to speed archiving without affecting the compression used for backups.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Config <cmd>archive-push</cmd> to use a lower compression level</title>
|
|
|
|
<backrest-config-option section="global:archive-push" key="compress-level">3</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>This configuration technique can be used for any command and can even target a specific stanza, e.g. <code>demo:archive-push</code>.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - RETENTION -->
|
|
<section id="retention">
|
|
<title>Configure Retention</title>
|
|
|
|
<p><backrest/> expires backups based on retention options.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Configure retention to 2 full backups</title>
|
|
|
|
<backrest-config-option section="global" key="repo1-retention-full">2</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>More information about retention can be found in the <link section="/retention">Retention</link> section.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - CONFIGURE ENCRYPTION -->
|
|
<!-- Since S3 and repository host require configure-archiving, this section must come after. -->
|
|
<section if="'{[encrypt]}' eq 'y'" id="configure-encryption">
|
|
<title>Configure Repository Encryption</title>
|
|
|
|
<p>The repository will be configured with a cipher type and key to demonstrate encryption. Encryption is always performed client-side even if the repository type (e.g. <proper>S3</proper> or other object store) supports encryption.</p>
|
|
|
|
<p>It is important to use a long, random passphrase for the cipher key. A good way to generate one is to run: <code>openssl rand -base64 48</code>.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Configure <backrest/> repository encryption</title>
|
|
|
|
<backrest-config-option section="global" key="repo1-cipher-type">{[backrest-repo-cipher-type]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo1-cipher-pass">{[backrest-repo-cipher-pass]}</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>Once the repository has been configured and the stanza created and checked, the repository encryption settings cannot be changed.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - STANZA CREATE -->
|
|
<section id="create-stanza">
|
|
<title>Create the Stanza</title>
|
|
|
|
<p>The <cmd>stanza-create</cmd> command must be run to initialize the stanza. It is recommended that the <cmd>check</cmd> command be run after <cmd>stanza-create</cmd> to ensure archiving and backups are properly configured.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create the stanza and check the configuration</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info stanza-create</exe-cmd>
|
|
<exe-highlight>completed successfully</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - CHECK CONFIGURATION -->
|
|
<section id="check-configuration">
|
|
<title>Check the Configuration</title>
|
|
<cmd-description key="check"/>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Check the configuration</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info check</exe-cmd>
|
|
<exe-highlight> successfully archived to </exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<!-- Decided not to show the error in this part of the user guide but added as a debug statement for reference. -->
|
|
<execute-list if="'{[debug]}' eq 'y'" host="{[host-pg1]}">
|
|
<title>Example of an invalid configuration</title>
|
|
|
|
<execute user="postgres" output="y" err-expect="82">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --archive-timeout=.1 check</exe-cmd>
|
|
<exe-highlight>could not find WAL segment|did not reach the archive</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - PERFORM BACKUP -->
|
|
<section id="perform-backup">
|
|
<title>Perform a Backup</title>
|
|
|
|
<p>To perform a backup of the <postgres/> cluster run <backrest/> with the <cmd>backup</cmd> command.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Backup the {[postgres-cluster-demo]} cluster</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]}
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>no prior backup exists|full backup size</exe-highlight>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n" variable-key="backup-full-first">
|
|
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>By default <backrest/> will attempt to perform an incremental backup. However, an incremental backup must be based on a full backup and since no full backup existed <backrest/> ran a full backup instead.</p>
|
|
|
|
<p>The <br-option>type</br-option> option can be used to specify a full or differential backup.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Differential backup of the {[postgres-cluster-demo]} cluster</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=diff
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>diff backup size</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>This time there was no warning because a full backup already existed. While incremental backups can be based on a full <i>or</i> differential backup, differential backups must be based on a full backup. A full backup can be performed by running the <cmd>backup</cmd> command with <br-setting>{[dash]}-type=full</br-setting>.</p>
|
|
|
|
<p>More information about the <cmd>backup</cmd> command can be found in the <link section="/backup">Backup</link> section.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - SCHEDULE BACKUP -->
|
|
<section id="schedule-backup">
|
|
<title>Schedule a Backup</title>
|
|
|
|
<p>Backups can be scheduled with utilities such as cron.</p>
|
|
|
|
<p>In the following example, two cron jobs are configured to run; full backups are scheduled for 6:30 AM every Sunday with differential backups scheduled for 6:30 AM Monday through Saturday. If this crontab is installed for the first time mid-week, then pgBackRest will run a full backup the first time the differential job is executed, followed the next day by a differential backup.</p>
|
|
|
|
<code-block title="crontab">
|
|
#m h dom mon dow command
|
|
30 06 * * 0 pgbackrest --type=full --stanza=demo backup
|
|
30 06 * * 1-6 pgbackrest --type=diff --stanza=demo backup
|
|
</code-block>
|
|
|
|
<p>Once backups are scheduled it's important to configure retention so backups are expired on a regular schedule, see <link section="/retention">Retention</link>.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - BACKUP INFO -->
|
|
<section id="backup-info" depend="perform-backup">
|
|
<title>Backup Information</title>
|
|
|
|
<p>Use the <cmd>info</cmd> command to get information about backups.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Get info for the {[postgres-cluster-demo]} cluster</title>
|
|
|
|
<execute user="postgres" filter="n" output="y">
|
|
<exe-cmd>{[project-exe]} info</exe-cmd>
|
|
<exe-highlight>(full|incr|diff) backup</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<cmd-description key="info"/>
|
|
</section>
|
|
|
|
<!-- SECTION => QUICKSTART - PERFORM RESTORE -->
|
|
<section id="perform-restore" depend="perform-backup">
|
|
<title>Restore a Backup</title>
|
|
|
|
<p>Backups can protect you from a number of disaster scenarios, the most common of which are hardware failure and data corruption. The easiest way to simulate data corruption is to remove an important <postgres/> cluster file.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop the {[postgres-cluster-demo]} cluster and delete the <file>pg_control</file> file</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>rm {[pg-path]}/global/pg_control</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Starting the cluster without this important file will result in an error.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Attempt to start the corrupted {[postgres-cluster-demo]} cluster</title>
|
|
|
|
<execute if="{[os-type-is-debian]}" user="root" output="y" err-expect="1">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
<exe-highlight>could not find the database system</exe-highlight>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-rhel]}" user="root" err-expect="1">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-rhel]}" user="root" output="y" err-expect="3">
|
|
<exe-cmd>{[pg-cluster-check]}</exe-cmd>
|
|
<exe-highlight>Failed to start PostgreSQL</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>To restore a backup of the <postgres/> cluster run <backrest/> with the <cmd>restore</cmd> command. The cluster needs to be stopped (in this case it is already stopped) and all files must be removed from the <postgres/> data directory.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Remove old files from {[postgres-cluster-demo]} cluster</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>find {[pg-path]} -mindepth 1 -delete</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Restore the {[postgres-cluster-demo]} cluster and start <postgres/></title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} restore</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>This time the cluster started successfully since the restore replaced the missing <file>pg_control</file> file.</p>
|
|
|
|
<p>More information about the <cmd>restore</cmd> command can be found in the <link section="/restore">Restore</link> section.</p>
|
|
</section>
|
|
</section>
|
|
|
|
<!-- SECTION => BACKUP -->
|
|
<section id="backup" depend="/quickstart/create-stanza">
|
|
<title>Backup</title>
|
|
|
|
<p>The Backup section introduces additional <cmd>backup</cmd> command features not covered in the <link section="/quickstart/perform-backup">Quick Start - Perform a Backup</link> section.</p>
|
|
|
|
<!-- SECTION => BACKUP - START-FAST -->
|
|
<section id="option-start-fast">
|
|
<title>Fast Start Option</title>
|
|
|
|
<p>By default <backrest/> will wait for the next regularly scheduled checkpoint before starting a backup. Depending on the <pg-option>checkpoint_timeout</pg-option> and <pg-option>checkpoint_segments</pg-option> settings in <postgres/> it may be quite some time before a checkpoint completes and the backup can begin.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Incremental backup of the {[postgres-cluster-demo]} cluster with the regularly scheduled checkpoint</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>backup begins after the next regular checkpoint completes</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>When <br-setting>{[dash]}-start-fast</br-setting> is passed on the command-line or <br-setting>start-fast=y</br-setting> is set in <file>{[backrest-config-demo]}</file> an immediate checkpoint is requested and the backup will start more quickly. This is convenient for testing and for ad-hoc backups. For instance, if a backup is being taken at the beginning of a release window it makes no sense to wait for a checkpoint. Since regularly scheduled backups generally only happen once per day it is unlikely that enabling the <br-option>start-fast</br-option> in <file>{[backrest-config-demo]}</file> will negatively affect performance, however for high-volume transactional systems you may want to pass <br-setting>{[dash]}-start-fast</br-setting> on the command-line instead. Alternately, it is possible to override the setting in the configuration file by passing <br-setting>{[dash]}-no-start-fast</br-setting> on the command-line.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Enable the <br-option>start-fast</br-option> option</title>
|
|
|
|
<backrest-config-option section="global" key="start-fast">y</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Incremental backup of the {[postgres-cluster-demo]} cluster with an immediate checkpoint</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>backup begins after the requested immediate checkpoint completes</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => BACKUP - STOP-AUTO -->
|
|
<section if="{[pg-version]} < 9.6" id="option-stop-auto">
|
|
<title>Automatic Stop Option</title>
|
|
|
|
<p>Sometimes <backrest/> will exit unexpectedly and the backup in progress on the <postgres/> cluster will not be properly stopped. <backrest/> exits as quickly as possible when an error occurs so that the cause can be reported accurately and is not masked by another problem that might happen during a more extensive cleanup.</p>
|
|
|
|
<p>Here an error is intentionally caused by removing repository permissions.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Revoke write privileges in the <backrest/> repository and attempt a backup</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>chmod 550 {[backrest-repo-path]}/backup/{[postgres-cluster-demo]}/</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" err-expect="47">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>ERROR:</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Even when the permissions are fixed <backrest/> will still be unable to perform a backup because the <postgres/> cluster is stuck in backup mode.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Restore write privileges in the <backrest/> repository and attempt a backup</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>chmod 750 {[backrest-repo-path]}/backup/{[postgres-cluster-demo]}/</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" err-expect="57">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>ERROR:</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Enabling the <br-option>stop-auto</br-option> option allows <backrest/> to stop the current backup if it detects that no other <backrest/> backup process is running.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Enable the <br-option>stop-auto</br-option> option</title>
|
|
|
|
<backrest-config-option section="global" key="stop-auto">y</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>Now <backrest/> will stop the old backup and start a new one so the process completes successfully.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Perform an incremental backup</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>cluster is already in backup mode|backup begins after the requested immediate checkpoint completes</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Although useful this feature may not be appropriate when another third-party backup solution is being used to take online backups as <backrest/> will not recognize that the other software is running and may terminate a backup started by that software. However, it would be unusual to run more than one third-party backup solution at the same time so this is not likely to be a problem.</p>
|
|
|
|
<admonition type="note"><id>pg_dump</id> and <id>pg_basebackup</id> do not take online backups so are not affected. It is safe to run them in conjunction with <backrest/>.</admonition>
|
|
</section>
|
|
|
|
<!-- SECTION => BACKUP - ARCHIVE-TIMEOUT -->
|
|
<section id="option-archive-timeout">
|
|
<title>Archive Timeout</title>
|
|
|
|
<p>During an online backup <backrest/> waits for WAL segments that are required for backup consistency to be archived. This wait time is governed by the <backrest/> <br-option>archive-timeout</br-option> option which defaults to 60 seconds. If archiving an individual segment is known to take longer then this option should be increased.</p>
|
|
</section>
|
|
</section>
|
|
|
|
<!-- *********************************************************************************************************************** -->
|
|
<section id="monitor" depend="/quickstart/perform-backup">
|
|
<title>Monitoring</title>
|
|
|
|
<p>Monitoring is an important part of any production system. There are many tools available and <backrest/> can be monitored on any of them with a little work.</p>
|
|
|
|
<p><backrest/> can output information about the repository in JSON format which includes a list of all backups for each stanza and WAL archive info.</p>
|
|
|
|
<section id="postgresql">
|
|
<title>In <postgres/></title>
|
|
|
|
<p>The <postgres/> <id>COPY</id> command allows <backrest/> info to be loaded into a table. The following example wraps that logic in a function that can be used to perform real-time queries.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Load <backrest/> info function for <postgres/></title>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>mkdir -p {[pg-home-path]}/pgbackrest/doc/example</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>cp -r {[pgbackrest-repo-path]}/doc/example/*
|
|
{[pg-home-path]}/pgbackrest/doc/example</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>cat
|
|
{[pg-home-path]}/pgbackrest/doc/example/pgsql-pgbackrest-info.sql</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>psql -f
|
|
{[pg-home-path]}/pgbackrest/doc/example/pgsql-pgbackrest-info.sql</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now the <code>monitor.pgbackrest_info()</code> function can be used to determine the last successful backup time and archived WAL for a stanza.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Query last successful backup time and archived WAL</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>cat
|
|
{[pg-home-path]}/pgbackrest/doc/example/pgsql-pgbackrest-query.sql</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>psql -f
|
|
{[pg-home-path]}/pgbackrest/doc/example/pgsql-pgbackrest-query.sql</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<section if="{[os-type-is-debian]}" id="jq">
|
|
<title>Using <proper>jq</proper></title>
|
|
|
|
<p><proper>jq</proper> is a command-line utility that can easily extract data from JSON.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Install <proper>jq</proper> utility</title>
|
|
|
|
<execute user="root" pre="y">
|
|
<exe-cmd>apt-get install jq</exe-cmd>
|
|
<exe-cmd-extra>-y 2>&1</exe-cmd-extra>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now <proper>jq</proper> can be used to query the last successful backup time for a stanza.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Query last successful backup time</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>
|
|
pgbackrest --output=json --stanza=demo info |
|
|
jq '.[0] | .backup[-1] | .timestamp.stop'
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Or the last archived WAL.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Query last archived WAL</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>
|
|
pgbackrest --output=json --stanza=demo info |
|
|
jq '.[0] | .archive[-1] | .max'
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<admonition type="note">This syntax requires <proper>jq v1.5</proper>.</admonition>
|
|
<admonition type="note"><proper>jq</proper> may round large numbers such as system identifiers. Test your queries carefully.</admonition>
|
|
</section>
|
|
</section>
|
|
|
|
<!-- SECTION => RETENTION -->
|
|
<section id="retention" depend="quickstart/perform-backup">
|
|
<title>Retention</title>
|
|
|
|
<p>Generally it is best to retain as many backups as possible to provide a greater window for <link section="/pitr">Point-in-Time Recovery</link>, but practical concerns such as disk space must also be considered. Retention options remove older backups once they are no longer needed.</p>
|
|
|
|
<cmd-description key="expire"/>
|
|
|
|
<!-- SECTION => RETENTION - FULL -->
|
|
<section id="full">
|
|
<title>Full Backup Retention</title>
|
|
|
|
<p>The <br-option>repo1-retention-full-type</br-option> determines how the option <br-option>repo1-retention-full</br-option> is interpreted; either as the count of full backups to be retained or how many days to retain full backups. New backups must be completed before expiration will occur &mdash; that means if <br-setting>repo1-retention-full-type=count</br-setting> and <br-setting>repo1-retention-full=2</br-setting> then there will be three full backups stored before the oldest one is expired, or if <br-setting>repo1-retention-full-type=time</br-setting> and <br-setting>repo1-retention-full=20</br-setting> then there must be one full backup that is at least 20 days old before expiration can occur.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Configure <br-option>repo1-retention-full</br-option></title>
|
|
|
|
<backrest-config-option section="global" key="repo1-retention-full">2</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>Backup <br-setting>repo1-retention-full=2</br-setting> but currently there is only one full backup so the next full backup to run will not expire any full backups.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Perform a full backup</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=full
|
|
--log-level-console=detail backup</exe-cmd>
|
|
<exe-highlight>archive retention on backup {[backup-full-first]}|remove archive</exe-highlight>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n" variable-key="backup-full-second">
|
|
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Archive <i>is</i> expired because WAL segments were generated before the oldest backup. These are not useful for recovery &mdash; only WAL segments generated after a backup can be used to recover that backup.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Perform a full backup</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=full
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>expire full backup set {[backup-full-first]}|archive retention on backup {[backup-full-second]}|remove archive</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The <id>{[backup-full-first]}</id> full backup is expired and archive retention is based on the <id>{[backup-full-second]}</id> which is now the oldest full backup.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => RETENTION - DIFF -->
|
|
<section id="diff">
|
|
<title>Differential Backup Retention</title>
|
|
|
|
<p>Set <br-option>repo1-retention-diff</br-option> to the number of differential backups required. Differentials only rely on the prior full backup so it is possible to create a <quote>rolling</quote> set of differentials for the last day or more. This allows quick restores to recent points-in-time but reduces overall space consumption.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Configure <br-option>repo1-retention-diff</br-option></title>
|
|
|
|
<backrest-config-option section="global" key="repo1-retention-diff">1</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>Backup <br-setting>repo1-retention-diff=1</br-setting> so two differentials will need to be performed before one is expired. An incremental backup is added to demonstrate incremental expiration. Incremental backups cannot be expired independently &mdash; they are always expired with their related full or differential backup.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Perform differential and incremental backups</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=diff backup</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n" variable-key="backup-diff-second">
|
|
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=incr backup</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now performing a differential backup will expire the previous differential and incremental backups leaving only one differential backup.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Perform a differential backup</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=diff
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>expire diff backup set {[backup-diff-second]}</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => RETENTION - ARCHIVE -->
|
|
<section id="archive">
|
|
<title>Archive Retention</title>
|
|
|
|
<p>Although <backrest/> automatically removes archived WAL segments when expiring backups (the default expires WAL for full backups based on the <br-option>repo1-retention-full</br-option> option), it may be useful to expire archive more aggressively to save disk space. Note that full backups are treated as differential backups for the purpose of differential archive retention.</p>
|
|
|
|
<p>Expiring archive will never remove WAL segments that are required to make a backup consistent. However, since Point-in-Time-Recovery (PITR) only works on a continuous WAL stream, care should be taken when aggressively expiring archive outside of the normal backup expiration process. To determine what will be expired without actually expiring anything, the <br-option>dry-run</br-option> option can be provided on the command line with the <cmd>expire</cmd> command.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Configure <br-option>repo1-retention-diff</br-option></title>
|
|
|
|
<backrest-config-option section="global" key="repo1-retention-diff">2</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Perform differential backup</title>
|
|
|
|
<execute user="postgres" show="n" variable-key="backup-diff-first">
|
|
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
|
|
</execute>
|
|
|
|
<!-- Push a few WAL segments to make the example below more interesting -->
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>psql -c "
|
|
select pg_create_restore_point('generate WAL'); select {[pg-switch-wal]}();
|
|
select pg_create_restore_point('generate WAL'); select {[pg-switch-wal]}();"</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=diff
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>new backup label</exe-highlight>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n" variable-key="backup-diff-second">
|
|
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Expire archive</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --log-level-console=detail
|
|
--repo1-retention-archive-type=diff --repo1-retention-archive=1 expire</exe-cmd>
|
|
<exe-highlight>archive retention on backup {[backup-diff-first]}|remove archive</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The <id>{[backup-diff-first]}</id> differential backup has archived WAL segments that must be retained to make the older backups consistent even though they cannot be played any further forward with PITR. WAL segments generated after <id>{[backup-diff-first]}</id> but before <id>{[backup-diff-second]}</id> are removed. WAL segments generated after the new backup <id>{[backup-diff-second]}</id> remain and can be used for PITR.</p>
|
|
|
|
<p>Since full backups are considered differential backups for the purpose of differential archive retention, if a full backup is now performed with the same settings, only the archive for that full backup is retained for PITR.</p>
|
|
</section>
|
|
</section>
|
|
|
|
<!-- SECTION => RESTORE -->
|
|
<section id="restore" depend="quickstart/perform-backup">
|
|
<title>Restore</title>
|
|
|
|
<cmd-description key="restore"/>
|
|
|
|
<p>The following sections introduce additional <cmd>restore</cmd> command features.</p>
|
|
|
|
<!-- ******************************************************************************************************************* -->
|
|
<section id="ownership">
|
|
<title>File Ownership</title>
|
|
|
|
<p>If a <cmd>restore</cmd> is run as a non-root user (the typical scenario) then all files restored will belong to the user/group executing <backrest/>. If existing files are not owned by the executing user/group then an error will result if the ownership cannot be updated to the executing user/group. In that case the file ownership will need to be updated by a privileged user before the restore can be retried.</p>
|
|
|
|
<p>If a <cmd>restore</cmd> is run as the <id>root</id> user then <backrest/> will attempt to recreate the ownership recorded in the manifest when the backup was made. Only user/group <b>names</b> are stored in the manifest so the same names must exist on the restore host for this to work. If the user/group name cannot be found locally then the user/group of the <postgres/> data directory will be used and finally <id>root</id> if the data directory user/group cannot be mapped to a name.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => RESTORE - DELTA -->
|
|
<section id="option-delta">
|
|
<title>Delta Option</title>
|
|
|
|
<p><link section="/quickstart/perform-restore">Restore a Backup</link> in <link section="/quickstart">Quick Start</link> required the database cluster directory to be cleaned before the <cmd>restore</cmd> could be performed. The <br-option>delta</br-option> option allows <backrest/> to automatically determine which files in the database cluster directory can be preserved and which ones need to be restored from the backup &mdash; it also <i>removes</i> files not present in the backup manifest so it will dispose of divergent changes. This is accomplished by calculating a <link url="https://en.wikipedia.org/wiki/SHA-1">SHA-1</link> cryptographic hash for each file in the database cluster directory. If the <id>SHA-1</id> hash does not match the hash stored in the backup then that file will be restored. This operation is very efficient when combined with the <br-option>process-max</br-option> option. Since the <postgres/> server is shut down during the restore, a larger number of processes can be used than might be desirable during a backup when the <postgres/> server is running.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop the {[postgres-cluster-demo]} cluster, perform delta restore</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" filter="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta
|
|
--log-level-console=detail restore</exe-cmd>
|
|
<exe-highlight>demo\/PG_VERSION - exists and matches backup|remove invalid files|rename global\/pg_control</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Restart <postgres/></title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => RESTORE - DELTA -->
|
|
<section id="option-db-include">
|
|
<title>Restore Selected Databases</title>
|
|
|
|
<p>There may be cases where it is desirable to selectively restore specific databases from a cluster backup. This could be done for performance reasons or to move selected databases to a machine that does not have enough space to restore the entire cluster backup.</p>
|
|
|
|
<p>To demonstrate this feature two databases are created: test1 and test2.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create two test databases</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "create database test1;"
|
|
</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "create database test2;"
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Each test database will be seeded with tables and data to demonstrate that recovery works with selective restore.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create a test table in each database</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "create table test1_table (id int);
|
|
insert into test1_table (id) values (1);" test1
|
|
</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "create table test2_table (id int);
|
|
insert into test2_table (id) values (2);" test2
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>A fresh backup is run so <backrest/> is aware of the new databases.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Perform a backup</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=incr backup</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>One of the main reasons to use selective restore is to save space. The size of the test1 database is shown here so it can be compared with the disk utilization after a selective restore.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Show space used by test1 database</title>
|
|
|
|
<execute user="postgres" show="n" variable-key="database-test1-oid">
|
|
<exe-cmd>
|
|
psql -Atc "select oid from pg_database where datname = 'test1'"
|
|
</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
du -sh {[pg-path]}/base/{[database-test1-oid]}
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>If the database to restore is not known, use the <cmd>info</cmd> command <br-option>set</br-option> option to discover databases that are part of the backup set.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Show database list for backup</title>
|
|
|
|
<execute user="postgres" show="n" variable-key="backup-last-incr">
|
|
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]}
|
|
{[dash]}-set={[backup-last-incr]} info</exe-cmd>
|
|
<exe-highlight>database list</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Stop the cluster and restore only the test2 database. Built-in databases (<id>template0</id>, <id>template1</id>, and <id>postgres</id>) are always restored.</p>
|
|
|
|
<admonition type="warning">Recovery may error unless <br-option>--type=immediate</br-option> is specified. This is because after consistency is reached <postgres/> will flag zeroed pages as errors even for a full-page write. For <postgres/> &ge; <proper>13</proper> the <pg-option>ignore_invalid_pages</pg-option> setting may be used to ignore invalid pages. In this case it is important to check the logs after recovery to ensure that no invalid pages were reported in the selected databases.</admonition>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Restore from last backup including only the test2 database</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta
|
|
{[dash]}-db-include=test2 {[dash]}-type=immediate {[dash]}-target-action=promote restore</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Once recovery is complete the test2 database will contain all previously created tables and data.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Demonstrate that the test2 database was recovered</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "select * from test2_table;" test2
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The test1 database, despite successful recovery, is not accessible. This is because the entire database was restored as sparse, zeroed files. <postgres/> can successfully apply WAL on the zeroed files but the database as a whole will not be valid because key files contain no data. This is purposeful to prevent the database from being accidentally used when it might contain partial data that was applied during WAL replay.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Attempting to connect to the test1 database will produce an error</title>
|
|
|
|
<execute user="postgres" output="y" filter="n" err-expect="2">
|
|
<exe-cmd>
|
|
psql -c "select * from test1_table;" test1
|
|
</exe-cmd>
|
|
<exe-highlight>relation mapping file.*contains invalid data</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Since the test1 database is restored with sparse, zeroed files it will only require as much space as the amount of WAL that is written during recovery. While the amount of WAL generated during a backup and applied during recovery can be significant it will generally be a small fraction of the total database size, especially for large databases where this feature is most likely to be useful.</p>
|
|
|
|
<p>It is clear that the test1 database uses far less disk space during the selective restore than it would have if the entire database had been restored.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Show space used by test1 database after recovery</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
du -sh {[pg-path]}/base/{[database-test1-oid]}
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>At this point the only action that can be taken on the invalid test1 database is <id>drop database</id>. <backrest/> does not automatically drop the database since this cannot be done until recovery is complete and the cluster is accessible.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Drop the test1 database</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "drop database test1;"
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now that the invalid test1 database has been dropped only the test2 and built-in databases remain.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>List remaining databases</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "select oid, datname from pg_database order by oid;"
|
|
</exe-cmd>
|
|
<exe-highlight>test2</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
</section>
|
|
|
|
<!-- SECTION => PITR -->
|
|
<section id="pitr" depend="quickstart/perform-backup">
|
|
<title>Point-in-Time Recovery</title>
|
|
|
|
<p><link section="/quickstart/perform-restore">Restore a Backup</link> in <link section="/quickstart">Quick Start</link> performed default recovery, which is to play all the way to the end of the WAL stream. In the case of a hardware failure this is usually the best choice but for data corruption scenarios (whether machine or human in origin) Point-in-Time Recovery (PITR) is often more appropriate.</p>
|
|
|
|
<p>Point-in-Time Recovery (PITR) allows the WAL to be played from the last backup to a specified lsn, time, transaction id, or recovery point. For common recovery scenarios time-based recovery is arguably the most useful. A typical recovery scenario is to restore a table that was accidentally dropped or data that was accidentally deleted. Recovering a dropped table is more dramatic so that's the example given here but deleted data would be recovered in exactly the same way.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Backup the {[postgres-cluster-demo]} cluster and create a table with very important data</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=diff backup</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>
|
|
psql -c "begin;
|
|
create table important_table (message text);
|
|
insert into important_table values ('{[test-table-data]}');
|
|
commit;
|
|
select * from important_table;"
|
|
</exe-cmd>
|
|
<exe-highlight>{[test-table-data]}</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>It is important to represent the time as reckoned by <postgres/> and to include timezone offsets. This reduces the possibility of unintended timezone conversions and an unexpected recovery result.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Get the time from <postgres/></title>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>sleep 1</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" filter="n" variable-key="time-recovery-timestamp">
|
|
<exe-cmd>
|
|
psql -Atc "select current_timestamp"
|
|
</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>sleep 1</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now that the time has been recorded the table is dropped. In practice finding the exact time that the table was dropped is a lot harder than in this example. It may not be possible to find the exact time, but some forensic work should be able to get you close.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Drop the important table</title>
|
|
|
|
<execute user="postgres" output="y" err-expect="1">
|
|
<exe-cmd>psql -c "begin;
|
|
drop table important_table;
|
|
commit;
|
|
select * from important_table;"</exe-cmd>
|
|
<exe-highlight>does not exist</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now the restore can be performed with time-based recovery to bring back the missing table.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop <postgres/>, restore the {[postgres-cluster-demo]} cluster to <id>{[time-recovery-timestamp]}</id>, and display <file>{[pg-recovery-file-demo]}</file></title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta
|
|
{[dash]}-type=time "{[dash]}-target={[time-recovery-timestamp]}"
|
|
--target-action=promote restore</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root" show="n">
|
|
<exe-cmd>rm {[postgres-log-demo]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>cat {[pg-recovery-path-demo]}</exe-cmd>
|
|
<exe-highlight>recovery_target_time</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p><backrest/> has automatically generated the recovery settings in <file>{[pg-recovery-file-demo]}</file> so <postgres/> can be started immediately. <id>%f</id> is how <postgres/> specifies the WAL segment it needs and <id>%p</id> is the location where it should be copied. Once <postgres/> has finished recovery the table will exist again and can be queried.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Start <postgres/> and check that the important table exists</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>psql -c "select * from important_table"</exe-cmd>
|
|
<exe-highlight>{[test-table-data]}</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The <postgres/> log also contains valuable information. It will indicate the time and transaction where the recovery stopped and also give the time of the last transaction to be applied.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Examine the <postgres/> log output</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>cat {[postgres-log-demo]}</exe-cmd>
|
|
<exe-highlight>recovery stopping before|last completed transaction|starting point-in-time recovery</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>This example was rigged to give the correct result. If a backup after the required time is chosen then <postgres/> will not be able to recover the lost table. <postgres/> can only play forward, not backward. To demonstrate this the important table must be dropped (again).</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Drop the important table (again)</title>
|
|
|
|
<execute user="postgres" output="y" err-expect="1">
|
|
<exe-cmd>psql -c "begin;
|
|
drop table important_table;
|
|
commit;
|
|
select * from important_table;"</exe-cmd>
|
|
<exe-highlight>does not exist</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now take a new backup and attempt recovery from the new backup by specifying the <br-option>{[dash]}-set</br-option> option. The <cmd>info</cmd> command can be used to find the new backup label.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Perform a backup and get backup info</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=incr backup</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n" variable-key="backup-last">
|
|
<exe-cmd>{[cmd-backup-last]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" filter="n" output="y">
|
|
<exe-cmd>{[project-exe]} info</exe-cmd>
|
|
<exe-highlight>{[backup-last]}</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Attempt recovery from the specified backup</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta
|
|
{[dash]}-set={[backup-last]}
|
|
{[dash]}-type=time "{[dash]}-target={[time-recovery-timestamp]}" {[dash]}-target-action=promote restore</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root" show="n">
|
|
<exe-cmd>rm {[postgres-log-demo]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" err-expect="1">
|
|
<exe-cmd>psql -c "select * from important_table"</exe-cmd>
|
|
<exe-highlight>does not exist</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Looking at the log output it's not obvious that recovery failed to restore the table. The key is to look for the presence of the <quote>recovery stopping before...</quote> and <quote>last completed transaction...</quote> log messages. If they are not present then the recovery to the specified point-in-time was not successful.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Examine the <postgres/> log output to discover the recovery was not successful</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>cat {[postgres-log-demo]}</exe-cmd>
|
|
<exe-highlight>starting point-in-time recovery|consistent recovery state reached</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The default behavior for time-based restore, if the <br-option>{[dash]}-set</br-option> option is not specified, is to attempt to discover an earlier backup to play forward from. If a backup set cannot be found, then restore will default to the latest backup which, as shown earlier, may not give the desired result.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop <postgres/>, restore from auto-selected backup, and start <postgres/></title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>
|
|
{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta
|
|
{[dash]}-type=time "{[dash]}-target={[time-recovery-timestamp]}"
|
|
{[dash]}-target-action=promote restore
|
|
</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root" show="n">
|
|
<exe-cmd>rm {[postgres-log-demo]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>psql -c "select * from important_table"</exe-cmd>
|
|
<exe-highlight>{[test-table-data]}</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now the log output will contain the expected <quote>recovery stopping before...</quote> and <quote>last completed transaction...</quote> messages showing that the recovery was successful.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Examine the <postgres/> log output for log messages indicating success</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>cat {[postgres-log-demo]}</exe-cmd>
|
|
<exe-highlight>recovery stopping before|last completed transaction|starting point-in-time recovery</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- ======================================================================================================================= -->
|
|
<section id="multi-repo" if="!{[object-any-all]}">
|
|
<title>Multiple Repositories</title>
|
|
|
|
<p>Multiple repositories may be configured as demonstrated in <link section="/s3-support">S3 Support</link>. A potential benefit is the ability to have a local repository for fast restores and a remote repository for redundancy.</p>
|
|
|
|
<p>Some commands, e.g. <cmd>stanza-create</cmd>/<cmd>stanza-update</cmd>, will automatically work with all configured repositories while others, e.g. <link section="/delete-stanza">stanza-delete</link>, will require a repository to be specified using the <br-option>repo</br-option> option. See the <link url="command.html">command reference</link> for details on which commands require the repository to be specified.</p>
|
|
|
|
<p>Note that the <br-option>repo</br-option> option is not required when only <br-option>repo1</br-option> is configured in order to maintain backward compatibility. However, the <br-option>repo</br-option> option <i>is</i> required when a single repo is configured as, e.g. <br-option>repo2</br-option>. This is to prevent command breakage if a new repository is added later.</p>
|
|
|
|
<p>The <cmd>archive-push</cmd> command will always push WAL to the archive in all configured repositories but backups will need to be scheduled individually for each repository. In many cases this is desirable since backup types and retention will vary by repository. Likewise, restores must specify a repository. It is generally better to specify a repository for restores that has low latency/cost even if that means more recovery time. Only restore testing can determine which repository will be most efficient.</p>
|
|
</section>
|
|
|
|
<!-- ======================================================================================================================= -->
|
|
<section id="azure-support" if="!{[object-any-all]}" depend="/quickstart/configure-archiving">
|
|
<title>Azure-Compatible Object Store Support</title>
|
|
|
|
<block id="azure-setup">
|
|
<block-variable-replace key="azure-setup-repo-id">2</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-host">{[host-pg1]}</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-config-owner">postgres:postgres</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-create-container">y</block-variable-replace>
|
|
</block>
|
|
|
|
<p>Commands are run exactly as if the repository were stored on a local disk.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create the stanza</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info stanza-create</exe-cmd>
|
|
<exe-highlight>completed successfully</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>File creation time in object stores is relatively slow so commands benefit by increasing <br-option>process-max</br-option> to parallelize file creation.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Backup the {[postgres-cluster-demo]} cluster</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>
|
|
{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --repo=2
|
|
--log-level-console=info backup
|
|
</exe-cmd>
|
|
<exe-highlight>no prior backup exists|full backup size</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => S3-SUPPORT -->
|
|
<section id="s3-support" if="!{[object-any-all]}" depend="/azure-support">
|
|
<title>S3-Compatible Object Store Support</title>
|
|
|
|
<block id="s3-setup">
|
|
<block-variable-replace key="s3-setup-repo-id">3</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-host">{[host-pg1]}</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-config-owner">postgres:postgres</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-create-bucket">y</block-variable-replace>
|
|
</block>
|
|
|
|
<p>A role should be created to run <backrest/> and the bucket permissions should be set as restrictively as possible. If the role is associated with an instance in <proper>AWS</proper> then <backrest/> will automatically retrieve temporary credentials when <br-option>repo3-s3-key-type=auto</br-option>, which means that keys do not need to be explicitly set in <file>{[backrest-config-demo]}</file>.</p>
|
|
|
|
<p>This sample <proper>Amazon S3</proper> policy will restrict all reads and writes to the bucket and repository path.</p>
|
|
|
|
<code-block title="Sample Amazon S3 Policy">
|
|
{
|
|
"Version": "2012-10-17",
|
|
"Statement": [
|
|
{
|
|
"Effect": "Allow",
|
|
"Action": [
|
|
"s3:ListBucket"
|
|
],
|
|
"Resource": [
|
|
"arn:aws:s3:::{[s3-bucket]}"
|
|
],
|
|
"Condition": {
|
|
"StringEquals": {
|
|
"s3:prefix": [
|
|
"",
|
|
"{[s3-repo]}"
|
|
],
|
|
"s3:delimiter": [
|
|
"/"
|
|
]
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"Effect": "Allow",
|
|
"Action": [
|
|
"s3:ListBucket"
|
|
],
|
|
"Resource": [
|
|
"arn:aws:s3:::{[s3-bucket]}"
|
|
],
|
|
"Condition": {
|
|
"StringLike": {
|
|
"s3:prefix": [
|
|
"{[s3-repo]}/*"
|
|
]
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"Effect": "Allow",
|
|
"Action": [
|
|
"s3:PutObject",
|
|
"s3:GetObject",
|
|
"s3:DeleteObject"
|
|
],
|
|
"Resource": [
|
|
"arn:aws:s3:::{[s3-bucket]}/{[s3-repo]}/*"
|
|
]
|
|
}
|
|
]
|
|
}
|
|
</code-block>
|
|
|
|
<p>Commands are run exactly as if the repository were stored on a local disk.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create the stanza</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info stanza-create</exe-cmd>
|
|
<exe-highlight>completed successfully</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>File creation time in object stores is relatively slow so commands benefit by increasing <br-option>process-max</br-option> to parallelize file creation.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Backup the {[postgres-cluster-demo]} cluster</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --repo=3
|
|
--log-level-console=info backup</exe-cmd>
|
|
<exe-highlight>no prior backup exists|full backup size</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- ======================================================================================================================= -->
|
|
<section id="gcs-support" if="!{[object-any-all]}" depend="/quickstart/configure-archiving">
|
|
<title>GCS-Compatible Object Store Support</title>
|
|
|
|
<block id="gcs-setup">
|
|
<block-variable-replace key="gcs-setup-repo-id">4</block-variable-replace>
|
|
<block-variable-replace key="gcs-setup-host">{[host-pg1]}</block-variable-replace>
|
|
<block-variable-replace key="gcs-setup-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="gcs-setup-config-owner">postgres:postgres</block-variable-replace>
|
|
</block>
|
|
|
|
<p>Commands are run exactly as if the repository were stored on a local disk.</p>
|
|
|
|
<p>File creation time in object stores is relatively slow so commands benefit by increasing <br-option>process-max</br-option> to parallelize file creation.</p>
|
|
</section>
|
|
|
|
<!-- ======================================================================================================================= -->
|
|
<section id="delete-stanza" if="!{[object-any-all]}" depend="/quickstart">
|
|
<title>Delete a Stanza</title>
|
|
|
|
<cmd-description key="stanza-delete"/>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop <postgres/> cluster to be removed</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop <backrest/> for the stanza</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info stop</exe-cmd>
|
|
<exe-highlight>completed successfully</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Delete the stanza from one repository</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>
|
|
{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --repo=1
|
|
{[dash]}-log-level-console=info stanza-delete
|
|
</exe-cmd>
|
|
<exe-highlight>completed successfully</exe-highlight>
|
|
</execute>
|
|
|
|
<execute user="root" show="n">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => REPOSITORY HOST -->
|
|
<section id="repo-host" depend="/quickstart/configure-archiving">
|
|
<title>Dedicated Repository Host</title>
|
|
|
|
<p>The configuration described in <link section="/quickstart">Quickstart</link> is suitable for simple installations but for enterprise configurations it is more typical to have a dedicated <host>repository</host> host where the backups and WAL archive files are stored. This separates the backups and WAL archive from the database server so <host>database</host> host failures have less impact. It is still a good idea to employ traditional backup software to backup the <host>repository</host> host.</p>
|
|
|
|
<p>On <postgres/> hosts, <br-option>pg1-path</br-option> is required to be the path of the local PostgreSQL cluster and no <br-option>pg1-host</br-option> should be configured. When configuring a repository host, the pgbackrest configuration file must have the <br-option>pg-host</br-option> option configured to connect to the primary and standby (if any) hosts. The repository host has the only pgbackrest configuration that should be aware of more than one <postgres/> host. Order does not matter, e.g. pg1-path/pg1-host, pg2-path/pg2-host can be primary or standby.</p>
|
|
|
|
<section id="install">
|
|
<title>Installation</title>
|
|
|
|
<p>A new host named <host>repository</host> is created to store the cluster backups.</p>
|
|
|
|
<admonition type="note">The <backrest/> version installed on the <host>repository</host> host must exactly match the version installed on the <postgres/> host.</admonition>
|
|
|
|
<host-add id="{[host-repo1-id]}" name="{[host-repo1]}" user="{[host-repo1-user]}" image="{[host-repo1-image]}" os="{[os-type]}" mount="{[host-repo1-mount]}" option="{[host-option]}"/>
|
|
|
|
<p>The <user>{[br-user]}</user> user is created to own the <backrest/> repository. Any user can own the repository but it is best not to use <user>postgres</user> (if it exists) to avoid confusion.</p>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Create <user>{[br-user]}</user> user</title>
|
|
|
|
<execute if="{[os-type-is-debian]}" user="root">
|
|
<exe-cmd>adduser --disabled-password --gecos "" {[br-user]}</exe-cmd>
|
|
</execute>
|
|
<execute if="{[os-type-is-rhel]}" user="root">
|
|
<exe-cmd>groupadd {[br-group]}</exe-cmd>
|
|
</execute>
|
|
<execute if="{[os-type-is-rhel]}" user="root">
|
|
<exe-cmd>adduser -g{[br-group]} -n {[br-user]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<block id="br-install">
|
|
<block-variable-replace key="br-install-host">{[host-repo1]}</block-variable-replace>
|
|
<block-variable-replace key="br-install-user">{[br-user]}</block-variable-replace>
|
|
<block-variable-replace key="br-install-group">{[br-group]}</block-variable-replace>
|
|
</block>
|
|
|
|
<block id="br-install-repo">
|
|
<block-variable-replace key="br-install-host">{[host-repo1]}</block-variable-replace>
|
|
<block-variable-replace key="br-install-user">{[br-user]}</block-variable-replace>
|
|
<block-variable-replace key="br-install-group">{[br-group]}</block-variable-replace>
|
|
</block>
|
|
</section>
|
|
|
|
<section id="setup-ssh">
|
|
<title>Setup Passwordless SSH</title>
|
|
|
|
<block id="setup-ssh-intro">
|
|
<!-- ??? Bogus variable is set because the syntax currently requires at least one -->
|
|
<block-variable-replace key="bogus"></block-variable-replace>
|
|
</block>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Create <host>{[host-repo1]}</host> host key pair</title>
|
|
|
|
<execute user="{[br-user]}">
|
|
<exe-cmd>mkdir -m 750 {[br-home-path]}/.ssh</exe-cmd>
|
|
</execute>
|
|
<execute user="{[br-user]}">
|
|
<exe-cmd>ssh-keygen -f {[br-home-path]}/.ssh/id_rsa
|
|
-t rsa -b 4096 -N ""</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<block id="setup-ssh">
|
|
<block-variable-replace key="setup-ssh-host">{[host-pg1]}</block-variable-replace>
|
|
<block-variable-replace key="setup-ssh-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="setup-ssh-user-home-path">{[pg-home-path]}</block-variable-replace>
|
|
</block>
|
|
|
|
<admonition type="note">ssh has been configured to only allow <backrest/> to be run via passwordless ssh. This enhances security in the event that one of the service accounts is hijacked.</admonition>
|
|
|
|
<!-- <block if="{[pg-version]} >= 11" id="setup-ssh">
|
|
<block-variable-replace key="setup-ssh-host">{[host-pg1]}</block-variable-replace>
|
|
<block-variable-replace key="setup-ssh-user">pgbackrest</block-variable-replace>
|
|
<block-variable-replace key="setup-ssh-user-home-path">{[br-home-path]}</block-variable-replace>
|
|
</block> -->
|
|
</section>
|
|
|
|
<!-- SECTION => REPOSITORY HOST - INSTALL/CONFIGURE -->
|
|
<section id="config">
|
|
<title>Configuration</title>
|
|
|
|
<backrest-config host="{[host-repo1]}" show="n" owner="{[br-user]}:{[br-group]}" file="{[backrest-config-demo]}">
|
|
<title>Configure the <backrest/> repository path</title>
|
|
|
|
<backrest-config-option section="global" key="repo1-path">{[backrest-repo-path]}</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>The <host>repository</host> host must be configured with the <host>{[host-pg1]}</host> host/user and database path. The primary will be configured as <id>pg1</id> to allow a standby to be added later.</p>
|
|
|
|
<backrest-config host="{[host-repo1]}" owner="{[br-user]}:{[br-group]}" file="{[backrest-config-demo]}">
|
|
<title>Configure <br-option>pg1-host</br-option>/<br-option>pg1-host-user</br-option> and <br-option>pg1-path</br-option></title>
|
|
|
|
<backrest-config-option section="demo" key="pg1-path">{[pg-path]}</backrest-config-option>
|
|
<backrest-config-option section="demo" key="pg1-host">{[host-pg1]}</backrest-config-option>
|
|
<!-- <backrest-config-option if="{[pg-version]} >= 11" section="demo" key="pg1-host-user">{[br-user]}</backrest-config-option> -->
|
|
|
|
<backrest-config-option section="global" key="start-fast">y</backrest-config-option>
|
|
<backrest-config-option section="global" key="repo1-retention-full">2</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="log-level-stderr">off</backrest-config-option>
|
|
<backrest-config-option section="global" key="log-timestamp">n</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>The database host must be configured with the repository host/user. The default for the <br-option>repo1-host-user</br-option> option is <id>pgbackrest</id>. If the <id>postgres</id> user does restores on the repository host it is best not to also allow the <id>postgres</id> user to perform backups. However, the <id>postgres</id> user can read the repository directly if it is in the same group as the <id>pgbackrest</id> user.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}" reset="y">
|
|
<title>Configure <br-option>repo1-host</br-option>/<br-option>repo1-host-user</br-option></title>
|
|
|
|
<backrest-config-option section="demo" key="pg1-path">{[pg-path]}</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="repo1-host">{[host-repo1]}</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="log-level-file">detail</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="log-level-stderr">off</backrest-config-option>
|
|
<backrest-config-option section="global" key="log-timestamp">n</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>Commands are run the same as on a single host configuration except that some commands such as <cmd>backup</cmd> and <cmd>expire</cmd> are run from the <host>repository</host> host instead of the <host>database</host> host.</p>
|
|
|
|
<!-- <execute-list if="{[pg-version]} >= 11" host="{[host-pg1]}">
|
|
<title>Set permissions required for backup</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>
|
|
psql -c "
|
|
create user pgbackrest;
|
|
grant pg_read_all_settings to pgbackrest;
|
|
grant execute on function pg_start_backup(text, boolean, boolean) to pgbackrest;
|
|
grant execute on function pg_stop_backup(boolean, boolean) to pgbackrest;
|
|
grant execute on function pg_switch_wal() to pgbackrest;
|
|
grant execute on function pg_create_restore_point(text) to pgbackrest;"
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list> -->
|
|
|
|
<p if="'{[azure-all]}' eq 'y'">Configure Azure-compatible object store if required.</p>
|
|
|
|
<block id="azure-setup" if="'{[azure-all]}' eq 'y'">
|
|
<block-variable-replace key="azure-setup-repo-id">1</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-host">{[host-repo1]}</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-user">{[br-user]}</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-config-owner">{[br-user]}:{[br-group]}</block-variable-replace>
|
|
<block-variable-replace key="azure-setup-create-container">n</block-variable-replace>
|
|
</block>
|
|
|
|
<p if="'{[gcs-all]}' eq 'y'">Configure GCS-compatible object store if required.</p>
|
|
|
|
<block id="gcs-setup" if="'{[gcs-all]}' eq 'y'">
|
|
<block-variable-replace key="gcs-setup-repo-id">1</block-variable-replace>
|
|
<block-variable-replace key="gcs-setup-host">{[host-repo1]}</block-variable-replace>
|
|
<block-variable-replace key="gcs-setup-user">{[br-user]}</block-variable-replace>
|
|
<block-variable-replace key="gcs-setup-config-owner">{[br-user]}:{[br-group]}</block-variable-replace>
|
|
</block>
|
|
|
|
<p if="'{[s3-all]}' eq 'y'">Configure S3-compatible object store if required.</p>
|
|
|
|
<block id="s3-setup" if="'{[s3-all]}' eq 'y'">
|
|
<block-variable-replace key="s3-setup-repo-id">1</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-host">{[host-repo1]}</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-user">{[br-user]}</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-config-owner">{[br-user]}:{[br-group]}</block-variable-replace>
|
|
<block-variable-replace key="s3-setup-create-bucket">n</block-variable-replace>
|
|
</block>
|
|
|
|
<p if="!{[object-any-all]}">Create the stanza in the new repository.</p>
|
|
|
|
<execute-list host="{[host-repo1]}" if="!{[object-any-all]}">
|
|
<title>Create the stanza</title>
|
|
|
|
<!-- Create the stanza -->
|
|
<execute user="{[br-user]}" output="y" filter="n">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} stanza-create</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Check that the configuration is correct on both the <host>database</host> and <host>repository</host> hosts. More information about the <cmd>check</cmd> command can be found in <link section="/quickstart/check-configuration">Check the Configuration</link>.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Check the configuration</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} check</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Check the configuration</title>
|
|
|
|
<execute user="{[br-user]}" output="y" filter="n">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} check</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => REPOSITORY HOST - PERFORM BACKUP -->
|
|
<section id="perform-backup">
|
|
<title>Perform a Backup</title>
|
|
|
|
<p>To perform a backup of the <postgres/> cluster run <backrest/> with the <cmd>backup</cmd> command on the <host>repository</host> host.</p>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Backup the {[postgres-cluster-demo]} cluster</title>
|
|
|
|
<execute user="{[br-user]}" output="y" filter="n">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} backup</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Since a new repository was created on the <host>repository</host> host the warning about the incremental backup changing to a full backup was emitted.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => REPOSITORY HOST - PERFORM RESTORE -->
|
|
<section id="perform-restore">
|
|
<title>Restore a Backup</title>
|
|
|
|
<p>To perform a restore of the <postgres/> cluster run <backrest/> with the <cmd>restore</cmd> command on the <host>database</host> host.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop the {[postgres-cluster-demo]} cluster, restore, and restart <postgres/></title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta restore</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
</section>
|
|
|
|
<!-- SECTION => PARALLEL BACKUP-RESTORE -->
|
|
<section id="parallel-backup-restore" depend="repo-host/config">
|
|
<title>Parallel Backup / Restore</title>
|
|
|
|
<p><backrest/> offers parallel processing to improve performance of compression and transfer. The number of processes to be used for this feature is set using the <br-option>--process-max</br-option> option.</p>
|
|
|
|
<p>It is usually best not to use more than 25% of available CPUs for the <cmd>backup</cmd> command. Backups don't have to run that fast as long as they are performed regularly and the backup process should not impact database performance, if at all possible.</p>
|
|
|
|
<p>The restore command can and should use all available CPUs because during a restore the <postgres/> cluster is shut down and there is generally no other important work being done on the host. If the host contains multiple clusters then that should be considered when setting restore parallelism.</p>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Perform a backup with single process</title>
|
|
|
|
<execute user="{[br-user]}">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=full backup</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<backrest-config host="{[host-repo1]}" owner="{[br-user]}:{[br-group]}" file="{[backrest-config-demo]}">
|
|
<title>Configure <backrest/> to use multiple <cmd>backup</cmd> processes</title>
|
|
|
|
<backrest-config-option section="global" key="process-max">3</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Perform a backup with multiple processes</title>
|
|
|
|
<execute user="{[br-user]}">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=full backup</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Get backup info for the {[postgres-cluster-demo]} cluster</title>
|
|
|
|
<execute filter="n" output="y" user="{[br-user]}">
|
|
<exe-cmd>{[project-exe]} info</exe-cmd>
|
|
<exe-highlight>timestamp start/stop</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The performance of the last backup should be improved by using multiple processes. For very small backups the difference may not be very apparent, but as the size of the database increases so will time savings.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => START/STOP -->
|
|
<section id="start-stop" depend="/repo-host/config">
|
|
<title>Starting and Stopping</title>
|
|
|
|
<p>Sometimes it is useful to prevent <backrest/> from running on a system. For example, when failing over from a primary to a standby it's best to prevent <backrest/> from running on the old primary in case <postgres/> gets restarted or can't be completely killed. This will also prevent <backrest/> from running on <id>cron</id>.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop the <backrest/> services</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} stop</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>New <backrest/> processes will no longer run.</p>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Attempt a backup</title>
|
|
|
|
<execute user="{[br-user]}" err-expect="56" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} backup</exe-cmd>
|
|
<exe-highlight>\: stop file exists for all stanzas</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Specify the <br-option>--force</br-option> option to terminate any <backrest/> process that are currently running. If <backrest/> is already stopped then stopping again will generate a warning.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop the <backrest/> services again</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>{[project-exe]} stop</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Start <backrest/> processes again with the <cmd>start</cmd> command.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Start the <backrest/> services</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} start</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>It is also possible to stop <backrest/> for a single stanza.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop <backrest/> services for the <id>demo</id> stanza</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} stop</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>New <backrest/> processes for the specified stanza will no longer run.</p>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Attempt a backup</title>
|
|
|
|
<execute user="{[br-user]}" err-expect="56" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} backup</exe-cmd>
|
|
<exe-highlight>\: stop file exists for stanza demo</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The stanza must also be specified when starting the <backrest/> processes for a single stanza.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Start the <backrest/> services for the <id>demo</id> stanza</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} start</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => REPLICATION -->
|
|
<section id="replication" depend="/repo-host/perform-backup">
|
|
<title>Replication</title>
|
|
|
|
<p>Replication allows multiple copies of a <postgres/> cluster (called standbys) to be created from a single primary. The standbys are useful for balancing reads and to provide redundancy in case the primary host fails.</p>
|
|
|
|
<!-- SECTION => REPLICATION - INSTALLATION -->
|
|
<section id="installation">
|
|
<title>Installation</title>
|
|
|
|
<p>A new host named <host>{[host-pg2]}</host> is created to run the standby.</p>
|
|
|
|
<host-add id="{[host-pg2-id]}" name="{[host-pg2]}" user="{[host-pg2-user]}" image="{[host-pg2-image]}" os="{[os-type]}" mount="{[host-pg2-mount]}" option="{[host-option]}"/>
|
|
|
|
<!-- <execute-list if="{[pg-version]} >= 11" host="{[host-pg2]}">
|
|
<title>Create <user>{[br-user]}</user> user</title>
|
|
|
|
<execute if="{[os-type-is-debian]}" user="root">
|
|
<exe-cmd>adduser {[dash]}-ingroup {[pg-group]} {[dash]}-disabled-password {[dash]}-gecos "" {[br-user]}</exe-cmd>
|
|
</execute>
|
|
<execute if="{[os-type-is-rhel]}" user="root">
|
|
<exe-cmd>adduser -g{[pg-group]} -n {[br-user]}</exe-cmd>
|
|
</execute>
|
|
</execute-list> -->
|
|
|
|
<block id="br-install">
|
|
<block-variable-replace key="br-install-host">{[host-pg2]}</block-variable-replace>
|
|
<block-variable-replace key="br-install-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="br-install-group">postgres</block-variable-replace>
|
|
</block>
|
|
</section>
|
|
|
|
<!-- SECTION => REPLICATION - SETUP-SSH -->
|
|
<section id="setup-ssh">
|
|
<title>Setup Passwordless SSH</title>
|
|
|
|
<block id="setup-ssh-intro">
|
|
<!-- ??? Bogus variable is set because the syntax currently requires at least one -->
|
|
<block-variable-replace key="bogus"></block-variable-replace>
|
|
</block>
|
|
|
|
<block id="setup-ssh">
|
|
<block-variable-replace key="setup-ssh-host">{[host-pg2]}</block-variable-replace>
|
|
<block-variable-replace key="setup-ssh-user">postgres</block-variable-replace>
|
|
<block-variable-replace key="setup-ssh-user-home-path">{[pg-home-path]}</block-variable-replace>
|
|
</block>
|
|
|
|
<!-- <block if="{[pg-version]} >= 11" id="setup-ssh">
|
|
<block-variable-replace key="setup-ssh-host">{[host-pg2]}</block-variable-replace>
|
|
<block-variable-replace key="setup-ssh-user">{[br-user]}</block-variable-replace>
|
|
<block-variable-replace key="setup-ssh-user-home-path">{[br-home-path]}</block-variable-replace>
|
|
</block> -->
|
|
</section>
|
|
|
|
<!-- SECTION => REPLICATION - HOT-STANDBY -->
|
|
<section id="hot-standby">
|
|
<title>Hot Standby</title>
|
|
|
|
<p>A hot standby performs replication using the WAL archive and allows read-only queries.</p>
|
|
|
|
<p><backrest/> configuration is very similar to <host>{[host-pg1]}</host> except that the <id>standby</id> recovery type will be used to keep the cluster in recovery mode when the end of the WAL stream has been reached.</p>
|
|
|
|
<backrest-config host="{[host-pg2]}" file="{[backrest-config-demo]}">
|
|
<title>Configure <backrest/> on the standby</title>
|
|
|
|
<backrest-config-option section="demo" key="pg1-path">{[pg-path]}</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="repo1-host">{[host-repo1]}</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="log-level-file">detail</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="log-level-stderr">off</backrest-config-option>
|
|
<backrest-config-option section="global" key="log-timestamp">n</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p if="{[os-type-is-debian]}">The demo cluster must be created (even though it will be overwritten on restore) in order to create the <postgres/> configuration files.</p>
|
|
|
|
<execute-list if="{[os-type-is-debian]}" host="{[host-pg2]}">
|
|
<title>Create demo cluster</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-create]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p if="{[os-type-is-rhel]}">Create the path where <postgres/> will be restored.</p>
|
|
|
|
<execute-list if="{[os-type-is-rhel]}" host="{[host-pg2]}">
|
|
<title>Create <postgres/> path</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>
|
|
mkdir -p -m 700 {[pg-path]}
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now the standby can be created with the <cmd>restore</cmd> command.</p>
|
|
|
|
<admonition type="important">If the cluster is intended to be promoted without becoming the new primary (e.g. for reporting or testing), use <br-option>--archive-mode=off</br-option> or set <pg-option>archive_mode=off</pg-option> in <file>postgresql.conf</file> to disable archiving. If archiving is not disabled then the repository may be polluted with WAL that can make restores more difficult.</admonition>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Restore the {[postgres-cluster-demo]} standby cluster</title>
|
|
|
|
<execute user="postgres" if="{[os-type-is-debian]}">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta --type=standby restore</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" if="{[os-type-is-rhel]}">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=standby restore</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>cat {[pg-recovery-path-demo]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The <pg-setting>hot_standby</pg-setting> setting must be enabled before starting <postgres/> to allow read-only connections on <host>{[host-pg2]}</host>. Otherwise, connection attempts will be refused. The rest of the configuration is in case the standby is promoted to a primary.</p>
|
|
|
|
<postgres-config host="{[host-pg2]}" file="{[postgres-config-demo]}">
|
|
<title>Configure <postgres/></title>
|
|
|
|
<postgres-config-option key="hot_standby">on</postgres-config-option>
|
|
<postgres-config-option key="archive_command">'{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} archive-push %p'</postgres-config-option>
|
|
<postgres-config-option key="archive_mode">on</postgres-config-option>
|
|
<postgres-config-option key="wal_level">{[wal-level]}</postgres-config-option>
|
|
<postgres-config-option key="max_wal_senders">3</postgres-config-option>
|
|
<postgres-config-option key="log_filename">'postgresql.log'</postgres-config-option>
|
|
<postgres-config-option key="log_line_prefix">''</postgres-config-option>
|
|
</postgres-config>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Start <postgres/></title>
|
|
|
|
<execute user="root" show="n">
|
|
<exe-cmd>rm {[postgres-log-demo]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The <postgres/> log gives valuable information about the recovery. Note especially that the cluster has entered standby mode and is ready to accept read-only connections.</p>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Examine the <postgres/> log output for log messages indicating success</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>cat {[postgres-log-demo]}</exe-cmd>
|
|
<exe-highlight>entering standby mode|database system is ready to accept read only connections</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>An easy way to test that replication is properly configured is to create a table on <host>{[host-pg1]}</host>.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create a new table on the primary</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>
|
|
psql -c "
|
|
begin;
|
|
create table replicated_table (message text);
|
|
insert into replicated_table values ('{[test-table-data]}');
|
|
commit;
|
|
select * from replicated_table";
|
|
</exe-cmd>
|
|
<exe-highlight>{[test-table-data]}</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>And then query the same table on <host>{[host-pg2]}</host>.</p>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Query new table on the standby</title>
|
|
|
|
<execute user="postgres" output="y" err-expect="1">
|
|
<exe-cmd>psql -c "select * from replicated_table;"</exe-cmd>
|
|
<exe-highlight>does not exist</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>So, what went wrong? Since <postgres/> is pulling WAL segments from the archive to perform replication, changes won't be seen on the standby until the WAL segment that contains those changes is pushed from <host>{[host-pg1]}</host>.</p>
|
|
|
|
<p>This can be done manually by calling <code>{[pg-switch-wal]}()</code> which pushes the current WAL segment to the archive (a new WAL segment is created to contain further changes).</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Call <code>{[pg-switch-wal]}()</code></title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "select *, current_timestamp from {[pg-switch-wal]}()";
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now after a short delay the table will appear on <host>{[host-pg2]}</host>.</p>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Now the new table exists on the standby (may require a few retries)</title>
|
|
|
|
<execute user="postgres" output="y" retry="15" filter="n">
|
|
<exe-cmd>psql -c "
|
|
select *, current_timestamp from replicated_table"</exe-cmd>
|
|
<exe-highlight>{[test-table-data]}</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Check the standby configuration for access to the repository.</p>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Check the configuration</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info check</exe-cmd>
|
|
<exe-highlight>because this is a standby</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => REPLICATION - STREAMING -->
|
|
<section id="streaming">
|
|
<title>Streaming Replication</title>
|
|
|
|
<p>Instead of relying solely on the WAL archive, streaming replication makes a direct connection to the primary and applies changes as soon as they are made on the primary. This results in much less lag between the primary and standby.</p>
|
|
|
|
<p>Streaming replication requires a user with the replication privilege.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create replication user</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "
|
|
create user replicator password 'jw8s0F4' replication";
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The <file>pg_hba.conf</file> file must be updated to allow the standby to connect as the replication user. Be sure to replace the IP address below with the actual IP address of your <host>{[host-pg1]}</host>. A reload will be required after modifying the <file>pg_hba.conf</file> file.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create <file>pg_hba.conf</file> entry for replication user</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>
|
|
sh -c 'echo
|
|
"host replication replicator {[host-pg2-ip]}/32 md5"
|
|
>> {[postgres-hba-demo]}'
|
|
</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-reload]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The standby needs to know how to contact the primary so the <pg-option>primary_conninfo</pg-option> setting will be configured in <backrest/>.</p>
|
|
|
|
<backrest-config host="{[host-pg2]}" file="{[backrest-config-demo]}">
|
|
<title>Set <pg-option>primary_conninfo</pg-option></title>
|
|
|
|
<backrest-config-option section="demo" key="recovery-option">primary_conninfo=host={[host-pg1-ip]} port=5432 user=replicator</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>It is possible to configure a password in the <pg-option>primary_conninfo</pg-option> setting but using a <file>.pgpass</file> file is more flexible and secure.</p>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Configure the replication password in the <file>.pgpass</file> file.</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>
|
|
sh -c 'echo
|
|
"{[host-pg1-ip]}:*:replication:replicator:jw8s0F4"
|
|
>> {[postgres-pgpass]}'
|
|
</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>chmod 600 {[postgres-pgpass]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now the standby can be created with the <cmd>restore</cmd> command.</p>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Stop <postgres/> and restore the {[postgres-cluster-demo]} standby cluster</title>
|
|
|
|
<execute user="root" err-suppress="y">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta --type=standby restore</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>cat {[pg-recovery-path-demo]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<admonition type="note">The <pg-setting>primary_conninfo</pg-setting> setting has been written into the <file>{[pg-recovery-file-demo]}</file> file because it was configured as a <br-option>recovery-option</br-option> in <file>{[project-exe]}.conf</file>. The <br-setting>{[dash]}-type=preserve</br-setting> option can be used with the <cmd>restore</cmd> to leave the existing <file>{[pg-recovery-file-demo]}</file> file in place if that behavior is preferred.</admonition>
|
|
|
|
<p if="{[os-type-is-rhel]}">By default {[user-guide-os]} stores the <file>postgresql.conf</file> file in the <postgres/> data directory. That means the change made to <file>postgresql.conf</file> was overwritten by the last restore and the <pg-option>hot_standby</pg-option> setting must be enabled again. Other solutions to this problem are to store the <file>postgresql.conf</file> file elsewhere or to enable the <pg-option>hot_standby</pg-option> setting on the <host>{[host-pg1]}</host> host where it will be ignored.</p>
|
|
|
|
<postgres-config host="{[host-pg2]}" if="{[os-type-is-rhel]}" file="{[postgres-config-demo]}">
|
|
<title>Enable <pg-option>hot_standby</pg-option></title>
|
|
|
|
<postgres-config-option key="hot_standby">on</postgres-config-option>
|
|
</postgres-config>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Start <postgres/></title>
|
|
|
|
<execute user="root" show="n">
|
|
<exe-cmd>rm {[postgres-log-demo]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-start]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The <postgres/> log will confirm that streaming replication has started.</p>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Examine the <postgres/> log output for log messages indicating success</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>cat {[postgres-log-demo]}</exe-cmd>
|
|
<exe-highlight>started streaming WAL from primary</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now when a table is created on <host>{[host-pg1]}</host> it will appear on <host>{[host-pg2]}</host> quickly and without the need to call <code>{[pg-switch-wal]}()</code>.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create a new table on the primary</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>
|
|
psql -c "
|
|
begin;
|
|
create table stream_table (message text);
|
|
insert into stream_table values ('{[test-table-data]}');
|
|
commit;
|
|
select *, current_timestamp from stream_table";
|
|
</exe-cmd>
|
|
<exe-highlight>{[test-table-data]}</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Query table on the standby</title>
|
|
|
|
<execute user="postgres" output="y" retry="2" filter="n">
|
|
<exe-cmd>psql -c "
|
|
select *, current_timestamp from stream_table"</exe-cmd>
|
|
<exe-highlight>{[test-table-data]}</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
</section>
|
|
|
|
<!-- SECTION => ASYNCHRONOUS ARCHIVING -->
|
|
<section id="async-archiving" depend="/replication">
|
|
<title>Asynchronous Archiving</title>
|
|
|
|
<p>Asynchronous archiving is enabled with the <br-option>archive-async</br-option> option. This option enables asynchronous operation for both the <cmd>archive-push</cmd> and <cmd>archive-get</cmd> commands.</p>
|
|
|
|
<p>A spool path is required. The commands will store transient data here but each command works quite a bit differently so spool path usage is described in detail in each section.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create the spool directory</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>mkdir -p -m 750 {[spool-path]}</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>chown postgres:postgres {[spool-path]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Create the spool directory</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>mkdir -p -m 750 {[spool-path]}</exe-cmd>
|
|
</execute>
|
|
<execute user="root">
|
|
<exe-cmd>chown postgres:postgres {[spool-path]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>The spool path must be configured and asynchronous archiving enabled. Asynchronous archiving automatically confers some benefit by reducing the number of connections made to remote storage, but setting <br-option>process-max</br-option> can drastically improve performance by parallelizing operations. Be sure not to set <br-option>process-max</br-option> so high that it affects normal database operations.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Configure the spool path and asynchronous archiving</title>
|
|
|
|
<backrest-config-option section="global" key="spool-path">{[spool-path]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="archive-async">y</backrest-config-option>
|
|
<backrest-config-option section="global:archive-push" key="process-max">2</backrest-config-option>
|
|
<backrest-config-option section="global:archive-get" key="process-max">2</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<backrest-config host="{[host-pg2]}" file="{[backrest-config-demo]}">
|
|
<title>Configure the spool path and asynchronous archiving</title>
|
|
|
|
<backrest-config-option section="global" key="spool-path">{[spool-path]}</backrest-config-option>
|
|
<backrest-config-option section="global" key="archive-async">y</backrest-config-option>
|
|
<backrest-config-option section="global:archive-push" key="process-max">2</backrest-config-option>
|
|
<backrest-config-option section="global:archive-get" key="process-max">2</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<admonition type="note"><br-option>process-max</br-option> is configured using command sections so that the option is not used by backup and restore. This also allows different values for <cmd>archive-push</cmd> and <cmd>archive-get</cmd>.</admonition>
|
|
|
|
<p>For demonstration purposes streaming replication will be broken to force <postgres/> to get WAL using the <pg-option>restore_command</pg-option>.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Break streaming replication by changing the replication password</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "alter user replicator password 'bogus'"
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Restart standby to break connection</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-restart]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<section id="async-archive-push">
|
|
<title>Archive Push</title>
|
|
|
|
<p>The asynchronous <cmd>archive-push</cmd> command offloads WAL archiving to a separate process (or processes) to improve throughput. It works by <quote>looking ahead</quote> to see which WAL segments are ready to be archived beyond the request that <postgres/> is currently making via the <code>archive_command</code>. WAL segments are transferred to the archive directly from the <path>pg_xlog</path>/<path>pg_wal</path> directory and success is only returned by the <code>archive_command</code> when the WAL segment has been safely stored in the archive.</p>
|
|
|
|
<p>The spool path holds the current status of WAL archiving. Status files written into the spool directory are typically zero length and should consume a minimal amount of space (a few MB at most) and very little IO. All the information in this directory can be recreated so it is not necessary to preserve the spool directory if the cluster is moved to new hardware.</p>
|
|
|
|
<admonition type="important">In the original implementation of asynchronous archiving, WAL segments were copied to the spool directory before compression and transfer. The new implementation copies WAL directly from the <path>pg_xlog</path> directory. If asynchronous archiving was utilized in <proper>v1.12</proper> or prior, read the <proper>v1.13</proper> release notes carefully before upgrading.</admonition>
|
|
|
|
<p>The <file>[stanza]-archive-push-async.log</file> file can be used to monitor the activity of the asynchronous process. A good way to test this is to quickly push a number of WAL segments.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Test parallel asynchronous archiving</title>
|
|
|
|
<execute user="postgres" output="n" show="n">
|
|
<exe-cmd>rm -f /var/log/pgbackrest/demo-archive-push-async.log</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="n">
|
|
<exe-cmd>
|
|
psql -c "
|
|
select pg_create_restore_point('test async push'); select {[pg-switch-wal]}();
|
|
select pg_create_restore_point('test async push'); select {[pg-switch-wal]}();
|
|
select pg_create_restore_point('test async push'); select {[pg-switch-wal]}();
|
|
select pg_create_restore_point('test async push'); select {[pg-switch-wal]}();
|
|
select pg_create_restore_point('test async push'); select {[pg-switch-wal]}();"
|
|
</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info check</exe-cmd>
|
|
<exe-highlight>WAL segment</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Now the log file will contain parallel, asynchronous activity.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Check results in the log</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>cat /var/log/pgbackrest/demo-archive-push-async.log</exe-cmd>
|
|
<exe-highlight> WAL file\(s\) to archive|pushed WAL file \'0000000</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<section id="async-archive-get">
|
|
<title>Archive Get</title>
|
|
|
|
<p>The asynchronous <cmd>archive-get</cmd> command maintains a local queue of WAL to improve throughput. If a WAL segment is not found in the queue it is fetched from the repository along with enough consecutive WAL to fill the queue. The maximum size of the queue is defined by <br-option>archive-get-queue-max</br-option>. Whenever the queue is less than half full more WAL will be fetched to fill it.</p>
|
|
|
|
<p>Asynchronous operation is most useful in environments that generate a lot of WAL or have a high latency connection to the repository storage (i.e., <proper>S3</proper> or other object stores). In the case of a high latency connection it may be a good idea to increase <br-option>process-max</br-option>.</p>
|
|
|
|
<p>The <file>[stanza]-archive-get-async.log</file> file can be used to monitor the activity of the asynchronous process.</p>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Check results in the log</title>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>sleep 5</exe-cmd>
|
|
</execute>
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>cat /var/log/pgbackrest/demo-archive-get-async.log</exe-cmd>
|
|
<exe-highlight>found [0-F]{24} in the .* archive</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Fix streaming replication by changing the replication password</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>
|
|
psql -c "alter user replicator password 'jw8s0F4'"
|
|
</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
</section>
|
|
|
|
<!-- SECTION => STANDBY-BACKUP -->
|
|
<section id="standby-backup" depend="/replication/streaming">
|
|
<title>Backup from a Standby</title>
|
|
|
|
<p><backrest/> can perform backups on a standby instead of the primary. Standby backups require the <host>{[host-pg2]}</host> host to be configured and the <br-option>backup-standby</br-option> option enabled. If more than one standby is configured then the first running standby found will be used for the backup.</p>
|
|
|
|
<backrest-config host="{[host-repo1]}" owner="{[br-user]}:{[br-group]}" file="{[backrest-config-demo]}">
|
|
<title>Configure <br-option>pg2-host</br-option>/<br-option>pg2-host-user</br-option> and <br-option>pg2-path</br-option></title>
|
|
|
|
<backrest-config-option section="demo" key="pg2-path">{[pg-path]}</backrest-config-option>
|
|
<backrest-config-option section="demo" key="pg2-host">{[host-pg2]}</backrest-config-option>
|
|
<!-- <backrest-config-option if="{[pg-version]} >= 11" section="demo"
|
|
key="pg2-host-user">{[br-user]}</backrest-config-option> -->
|
|
|
|
<backrest-config-option section="global" key="backup-standby">y</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<p>Both the primary and standby databases are required to perform the backup, though the vast majority of the files will be copied from the standby to reduce load on the primary. The database hosts can be configured in any order. <backrest/> will automatically determine which is the primary and which is the standby.</p>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Backup the {[postgres-cluster-demo]} cluster from <host>pg2</host></title>
|
|
|
|
<execute user="{[br-user]}" output="y" filter="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --log-level-console=detail backup</exe-cmd>
|
|
<exe-highlight>backup file {[host-pg1]}|replay on the standby</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>This incremental backup shows that most of the files are copied from the <host>{[host-pg2]}</host> host and only a few are copied from the <host>{[host-pg1]}</host> host.</p>
|
|
|
|
<p><backrest/> creates a standby backup that is identical to a backup performed on the primary. It does this by starting/stopping the backup on the <host>{[host-pg1]}</host> host, copying only files that are replicated from the <host>{[host-pg2]}</host> host, then copying the remaining few files from the <host>{[host-pg1]}</host> host. This means that logs and statistics from the primary database will be included in the backup.</p>
|
|
</section>
|
|
|
|
<!-- SECTION => STANZA UPGRADE -->
|
|
<section id="upgrade-stanza">
|
|
<title>Upgrading <postgres/></title>
|
|
<cmd-description key="stanza-upgrade"/>
|
|
|
|
<p>The following instructions are not meant to be a comprehensive guide for upgrading <postgres/>, rather they outline the general process for upgrading a primary and standby with the intent of demonstrating the steps required to reconfigure <backrest/>. It is recommended that a backup be taken prior to upgrading.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Stop old cluster</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Stop the old cluster on the standby since it will be restored from the newly upgraded cluster.</p>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Stop old cluster</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-stop]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Create the new cluster and perform upgrade.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Create new cluster and perform the upgrade</title>
|
|
|
|
<execute user="postgres">
|
|
<exe-cmd>
|
|
{[pg-bin-upgrade-path]}/initdb
|
|
-D {[pg-path-upgrade]} -k -A peer
|
|
</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root" if="{[os-type-is-debian]}">
|
|
<exe-cmd>{[pg-cluster-create-upgrade]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" if="{[os-type-is-debian]}">
|
|
<exe-cmd>sh -c 'cd /var/lib/postgresql &&
|
|
/usr/lib/postgresql/{[pg-version-upgrade]}/bin/pg_upgrade
|
|
{[dash]}-old-bindir=/usr/lib/postgresql/{[pg-version]}/bin
|
|
{[dash]}-new-bindir=/usr/lib/postgresql/{[pg-version-upgrade]}/bin
|
|
{[dash]}-old-datadir={[pg-path]}
|
|
{[dash]}-new-datadir={[pg-path-upgrade]}
|
|
{[dash]}-old-options=" -c config_file={[postgres-config-demo]}"
|
|
{[dash]}-new-options=" -c config_file={[postgres-config-demo-upgrade]}"'
|
|
</exe-cmd>
|
|
<exe-highlight>Upgrade Complete</exe-highlight>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" if="{[os-type-is-rhel]}">
|
|
<exe-cmd>sh -c 'cd /var/lib/pgsql &&
|
|
/usr/pgsql-{[pg-version-upgrade]}/bin/pg_upgrade
|
|
{[dash]}-old-bindir=/usr/pgsql-{[pg-version]}/bin
|
|
{[dash]}-new-bindir=/usr/pgsql-{[pg-version-upgrade]}/bin
|
|
{[dash]}-old-datadir={[pg-path]}
|
|
{[dash]}-new-datadir={[pg-path-upgrade]}
|
|
{[dash]}-old-options=" -c config_file={[postgres-config-demo]}"
|
|
{[dash]}-new-options=" -c config_file={[postgres-config-demo-upgrade]}"'
|
|
</exe-cmd>
|
|
<exe-highlight>Upgrade Complete</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Configure the new cluster settings and port.</p>
|
|
|
|
<postgres-config host="{[host-pg1]}" file="{[postgres-config-demo-upgrade]}">
|
|
<title>Configure <postgres/></title>
|
|
|
|
<postgres-config-option key="archive_command">'{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} archive-push %p'</postgres-config-option>
|
|
<postgres-config-option key="archive_mode">on</postgres-config-option>
|
|
<postgres-config-option key="wal_level">{[wal-level]}</postgres-config-option>
|
|
<postgres-config-option key="max_wal_senders">3</postgres-config-option>
|
|
<postgres-config-option key="listen_addresses">'*'</postgres-config-option>
|
|
<postgres-config-option key="log_line_prefix">''</postgres-config-option>
|
|
<postgres-config-option key="port">5432</postgres-config-option>
|
|
</postgres-config>
|
|
|
|
<p>Update the <backrest/> configuration on all systems to point to the new cluster.</p>
|
|
|
|
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
|
<title>Upgrade the <br-option>pg1-path</br-option></title>
|
|
|
|
<backrest-config-option section="demo" key="pg1-path">{[pg-path-upgrade]}</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<backrest-config host="{[host-pg2]}" file="{[backrest-config-demo]}">
|
|
<title>Upgrade the <br-option>pg-path</br-option></title>
|
|
|
|
<backrest-config-option section="demo" key="pg1-path">{[pg-path-upgrade]}</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<backrest-config host="{[host-repo1]}" owner="{[br-user]}:{[br-group]}" file="{[backrest-config-demo]}">
|
|
<title>Upgrade <br-option>pg1-path</br-option> and <br-option>pg2-path</br-option>, disable backup from standby</title>
|
|
|
|
<backrest-config-option section="demo" key="pg1-path">{[pg-path-upgrade]}</backrest-config-option>
|
|
<backrest-config-option section="demo" key="pg2-path">{[pg-path-upgrade]}</backrest-config-option>
|
|
|
|
<backrest-config-option section="global" key="backup-standby">n</backrest-config-option>
|
|
</backrest-config>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Copy hba configuration</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>cp {[postgres-hba-demo]}
|
|
{[postgres-hba-demo-upgrade]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Before starting the new cluster, the <cmd>stanza-upgrade</cmd> command must be run.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Upgrade the stanza</title>
|
|
|
|
<execute user="postgres" output="y">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-no-online
|
|
{[dash]}-log-level-console=info stanza-upgrade</exe-cmd>
|
|
<exe-highlight>completed successfully</exe-highlight>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Start the new cluster and confirm it is successfully installed.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Start new cluster</title>
|
|
|
|
<execute user="root" output="y">
|
|
<exe-cmd>{[pg-cluster-start-upgrade]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Test configuration using the <cmd>check</cmd> command.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Check configuration</title>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>{[pg-cluster-check-upgrade]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" filter="n">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} check</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Remove the old cluster.</p>
|
|
|
|
<execute-list host="{[host-pg1]}">
|
|
<title>Remove old cluster</title>
|
|
|
|
<execute if="{[os-type-is-debian]}" user="root">
|
|
<exe-cmd>pg_dropcluster {[pg-version]} {[postgres-cluster-demo]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-rhel]}" user="root">
|
|
<exe-cmd>rm -rf {[pg-path]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Install the new <postgres/> binaries on the standby and create the cluster.</p>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Remove old cluster and create the new cluster</title>
|
|
|
|
<execute if="{[os-type-is-debian]}" user="root">
|
|
<exe-cmd>pg_dropcluster {[pg-version]} {[postgres-cluster-demo]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute if="{[os-type-is-rhel]}" user="root">
|
|
<exe-cmd>rm -rf {[pg-path]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" if="{[os-type-is-rhel]}">
|
|
<exe-cmd>
|
|
mkdir -p -m 700 {[pg-bin-upgrade-path]}
|
|
</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="root" if="{[os-type-is-debian]}">
|
|
<exe-cmd>{[pg-cluster-create-upgrade]}</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Run the <cmd>check</cmd> on the repository host. The warning regarding the standby being down is expected since the standby cluster is down. Running this command demonstrates that the repository server is aware of the standby and is configured properly for the primary server.</p>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Check configuration</title>
|
|
|
|
<execute user="{[br-user]}" output="y" filter="n" >
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} check</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Run a full backup on the new cluster and then restore the standby from the backup. The backup type will automatically be changed to <id>full</id> if <id>incr</id> or <id>diff</id> is requested.</p>
|
|
|
|
<execute-list host="{[host-repo1]}">
|
|
<title>Run a full backup</title>
|
|
|
|
<execute user="{[br-user]}">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-type=full backup</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Restore the {[postgres-cluster-demo]} standby cluster</title>
|
|
|
|
<execute user="postgres" if="{[os-type-is-debian]}">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-delta --type=standby restore</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" if="{[os-type-is-rhel]}">
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} --type=standby restore</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<postgres-config host="{[host-pg2]}" file="{[postgres-config-demo-upgrade]}">
|
|
<title>Configure <postgres/></title>
|
|
|
|
<postgres-config-option key="hot_standby">on</postgres-config-option>
|
|
</postgres-config>
|
|
|
|
<execute-list host="{[host-pg2]}">
|
|
<title>Start <postgres/> and check the <backrest/> configuration</title>
|
|
|
|
<execute user="root">
|
|
<exe-cmd>{[pg-cluster-start-upgrade]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" show="n">
|
|
<exe-cmd>{[pg-cluster-wait]}</exe-cmd>
|
|
</execute>
|
|
|
|
<execute user="postgres" output="y" filter="n" >
|
|
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} check</exe-cmd>
|
|
</execute>
|
|
</execute-list>
|
|
|
|
<p>Backup from standby can be enabled now that the standby is restored.</p>
|
|
|
|
<backrest-config host="{[host-repo1]}" owner="{[br-user]}:{[br-group]}" file="{[backrest-config-demo]}">
|
|
<title>Reenable backup from standby</title>
|
|
|
|
<backrest-config-option section="global" key="backup-standby">y</backrest-config-option>
|
|
</backrest-config>
|
|
</section>
|
|
</doc>
|