mirror of
https://github.com/pgbackrest/pgbackrest.git
synced 2024-12-12 10:04:14 +02:00
Add asynchronous, parallel archive-get.
This feature maintains a queue of WAL segments to help reduce latency when PostgreSQL requests a WAL segment with restore_command.
This commit is contained in:
parent
c48b0a2a1e
commit
54dd6f3ed4
@ -281,6 +281,8 @@ use constant CFGOPT_REPO_S3_VERIFY_SSL => CFGDEF_RE
|
||||
#-----------------------------------------------------------------------------------------------------------------------------------
|
||||
use constant CFGOPT_ARCHIVE_ASYNC => 'archive-async';
|
||||
push @EXPORT, qw(CFGOPT_ARCHIVE_ASYNC);
|
||||
use constant CFGOPT_ARCHIVE_GET_QUEUE_MAX => 'archive-get-queue-max';
|
||||
push @EXPORT, qw(CFGOPT_ARCHIVE_GET_QUEUE_MAX);
|
||||
use constant CFGOPT_ARCHIVE_PUSH_QUEUE_MAX => 'archive-push-queue-max';
|
||||
push @EXPORT, qw(CFGOPT_ARCHIVE_PUSH_QUEUE_MAX);
|
||||
|
||||
@ -558,6 +560,7 @@ my $rhCommandDefine =
|
||||
&CFGCMD_ARCHIVE_GET =>
|
||||
{
|
||||
&CFGDEF_LOG_FILE => false,
|
||||
&CFGDEF_LOCK_TYPE => CFGDEF_LOCK_TYPE_ARCHIVE,
|
||||
},
|
||||
|
||||
&CFGCMD_ARCHIVE_PUSH =>
|
||||
@ -1048,6 +1051,7 @@ my %hConfigDefine =
|
||||
&CFGDEF_ALLOW_RANGE => [WAIT_TIME_MINIMUM, 86400],
|
||||
&CFGDEF_COMMAND =>
|
||||
{
|
||||
&CFGCMD_ARCHIVE_GET => {},
|
||||
&CFGCMD_ARCHIVE_PUSH => {},
|
||||
&CFGCMD_BACKUP => {},
|
||||
&CFGCMD_CHECK => {},
|
||||
@ -1756,6 +1760,7 @@ my %hConfigDefine =
|
||||
&CFGDEF_DEFAULT => '/var/spool/' . BACKREST_EXE,
|
||||
&CFGDEF_COMMAND =>
|
||||
{
|
||||
&CFGCMD_ARCHIVE_GET => {},
|
||||
&CFGCMD_ARCHIVE_PUSH => {},
|
||||
},
|
||||
&CFGDEF_DEPEND =>
|
||||
@ -1773,6 +1778,7 @@ my %hConfigDefine =
|
||||
&CFGDEF_ALLOW_RANGE => [1, 96],
|
||||
&CFGDEF_COMMAND =>
|
||||
{
|
||||
&CFGCMD_ARCHIVE_GET => {},
|
||||
&CFGCMD_ARCHIVE_PUSH => {},
|
||||
&CFGCMD_BACKUP => {},
|
||||
&CFGCMD_RESTORE => {},
|
||||
@ -1864,6 +1870,7 @@ my %hConfigDefine =
|
||||
&CFGDEF_DEFAULT => false,
|
||||
&CFGDEF_COMMAND =>
|
||||
{
|
||||
&CFGCMD_ARCHIVE_GET => {},
|
||||
&CFGCMD_ARCHIVE_PUSH => {},
|
||||
}
|
||||
},
|
||||
@ -1884,6 +1891,18 @@ my %hConfigDefine =
|
||||
},
|
||||
},
|
||||
|
||||
&CFGOPT_ARCHIVE_GET_QUEUE_MAX =>
|
||||
{
|
||||
&CFGDEF_SECTION => CFGDEF_SECTION_GLOBAL,
|
||||
&CFGDEF_TYPE => CFGDEF_TYPE_INTEGER,
|
||||
&CFGDEF_DEFAULT => 128 * 1024 * 1024, # 128MB
|
||||
&CFGDEF_ALLOW_RANGE => [0, 4 * 1024 * 1024 * 1024 * 1024 * 1024], # 0-4PB
|
||||
&CFGDEF_COMMAND =>
|
||||
{
|
||||
&CFGCMD_ARCHIVE_GET => {},
|
||||
},
|
||||
},
|
||||
|
||||
# Backup options
|
||||
#-------------------------------------------------------------------------------------------------------------------------------
|
||||
&CFGOPT_ARCHIVE_CHECK =>
|
||||
|
@ -177,9 +177,15 @@
|
||||
<config-key id="spool-path" name="Spool Path">
|
||||
<summary>Path where transient data is stored.</summary>
|
||||
|
||||
<text>This path is used to store acknowledgements from the asynchronous <cmd>archive-push</cmd> process. These files are generally very small (zero to a few hundred bytes) so not much space is required.
|
||||
<text>This path is used to store data for the asynchronous <cmd>archive-push</cmd> and <cmd>archive-get</cmd> command.
|
||||
|
||||
The data stored in the spool path is not strictly temporary since it can and should survive a reboot. However, loss of the data in the spool path is not a problem. <backrest/> will simply recheck each WAL segment to ensure it is safely archived.</text>
|
||||
The asynchronous <cmd>archive-push</cmd> command writes acknowledgements into the spool path when it has successfully stored WAL in the archive (and errors on failure) so the foreground process can quickly notify <postgres/>. Acknowledgement files are very small (zero on success and a few hundred bytes on error).
|
||||
|
||||
The asynchronous <cmd>archive-push</cmd> process queues WAL in the spool path so it can be provided very quickly when <postgres/> requests it. Moving files to <postgres/> is most efficient when the spool path is on the same filesystem as <path>pg_xlog</path>/<path>pg_wal</path>.
|
||||
|
||||
The data stored in the spool path is not strictly temporary since it can and should survive a reboot. However, loss of the data in the spool path is not a problem. <backrest/> will simply recheck each WAL segment to ensure it is safely archived for <cmd>archive-push</cmd> and rebuild the queue for <cmd>archive-get</cmd>.
|
||||
|
||||
The spool path is intended to be located on a local Posix-compatible filesystem, not a remote filesystem such as <proper>NFS</proper> or <proper>CIFS</proper>.</text>
|
||||
|
||||
<example>/backup/db/spool</example>
|
||||
</config-key>
|
||||
@ -514,7 +520,7 @@
|
||||
<config-key id="manifest-save-threshold" name="Manifest Save Threshold">
|
||||
<summary>Manifest save threshold during backup.</summary>
|
||||
|
||||
<text>Defines how often the manifest will be saved during a backup (in bytes). Saving the manifest is important because it stores the checksums and allows the resume function to work efficiently. The actual threshold used is 1% of the backup size or <setting>manifest-save-threshold</setting>, whichever is greater.
|
||||
<text>Defines how often the manifest will be saved during a backup. Saving the manifest is important because it stores the checksums and allows the resume function to work efficiently. The actual threshold used is 1% of the backup size or <setting>manifest-save-threshold</setting>, whichever is greater.
|
||||
|
||||
Size can be entered in bytes (default) or KB, MB, GB, TB, or PB where the multiplier is a power of 1024.</text>
|
||||
|
||||
@ -558,21 +564,34 @@
|
||||
|
||||
<!-- CONFIG - ARCHIVE -->
|
||||
<config-section id="archive" name="Archive">
|
||||
<text>The <setting>archive</setting> section defines parameters when doing async archiving. This means that the archive files will be stored locally, then a background process will pick them and move them to the backup.</text>
|
||||
<text>The <setting>archive</setting> section defines options for the <cmd>archive-push</cmd> and <cmd>archive-get</cmd> commands.</text>
|
||||
|
||||
<config-key-list>
|
||||
<!-- CONFIG - ARCHIVE SECTION - ARCHIVE-ASYNC KEY -->
|
||||
<config-key id="archive-async" name="Asynchronous Archiving">
|
||||
<summary>Archive WAL segments asynchronously.</summary>
|
||||
<summary>Push/get WAL segments asynchronously.</summary>
|
||||
|
||||
<text>WAL segments will be copied to the local repo, then a process will be forked to compress the segment and transfer it to the remote repo if configured. Control will be returned to <postgres/> as soon as the WAL segment is copied locally.</text>
|
||||
<text>Enables asynchronous operation for the <cmd>archive-push</cmd> and <cmd>archive-get</cmd> commands.
|
||||
|
||||
Asynchronous operation is more efficient because it can reuse connections and take advantage of parallelism. See the <br-option>spool-path</br-option>, <br-option>archive-get-queue-max</br-option>, and <br-option>archive-push-queue-max</br-option> options for more information.</text>
|
||||
|
||||
<example>y</example>
|
||||
</config-key>
|
||||
|
||||
<!-- CONFIG - ARCHIVE SECTION - ARCHIVE-GET-QUEUE-MAX KEY -->
|
||||
<config-key id="archive-get-queue-max" name="Maximum Archive Get Queue Size">
|
||||
<summary>Maximum size of the <backrest/> archive-get queue.</summary>
|
||||
|
||||
<text>Specifies the maximum size of the <cmd>archive-get</cmd> queue when <br-option>archive-async</br-option> is enabled. The queue is stored in the <br-option>spool-path</br-option> and is used to speed providing WAL to <postgres/>.
|
||||
|
||||
Size can be entered in bytes (default) or KB, MB, GB, TB, or PB where the multiplier is a power of 1024.</text>
|
||||
|
||||
<example>1073741824</example>
|
||||
</config-key>
|
||||
|
||||
<!-- CONFIG - ARCHIVE SECTION - ARCHIVE-QUEUE-MAX KEY -->
|
||||
<config-key id="archive-push-queue-max" name="Maximum Archive Push Queue Size">
|
||||
<summary>Limit size (in bytes) of the <postgres/> archive queue.</summary>
|
||||
<summary>Maximum size of the <postgres/> archive queue.</summary>
|
||||
|
||||
<text>After the limit is reached, the following will happen:
|
||||
<ol>
|
||||
|
@ -25,6 +25,10 @@
|
||||
</release-bug-list>
|
||||
|
||||
<release-feature-list>
|
||||
<release-item>
|
||||
<p>Add asynchronous, parallel <cmd>archive-get</cmd>. This feature maintains a queue of WAL segments to help reduce latency when <postgres/> requests a WAL segment with <pg-option>restore_command</pg-option>.</p>
|
||||
</release-item>
|
||||
|
||||
<release-item>
|
||||
<release-item-contributor-list>
|
||||
<release-item-contributor id="shang.cynthia"/>
|
||||
|
@ -1970,75 +1970,6 @@
|
||||
</execute>
|
||||
</execute-list>
|
||||
</section>
|
||||
|
||||
<!-- SECTION => REPOSITORY HOST - ASYNCHRONOUS ARCHIVING -->
|
||||
<section id="async-archiving" depend="config">
|
||||
<title>Asynchronous Archiving</title>
|
||||
|
||||
<p>The <br-option>archive-async</br-option> option offloads WAL archiving to a separate process (or processes) to improve throughput. It works by <quote>looking ahead</quote> to see which WAL segments are ready to be archived beyond the request that <postgres/> is currently making via the <code>archive_command</code>. WAL segments are transferred to the archive directly from the <path>pg_xlog</path>/<path>pg_wal</path> directory and success is only returned by the <code>archive_command</code> when the WAL segment has been safely stored in the archive.</p>
|
||||
|
||||
<p>The spool directory is created to hold the current status of WAL archiving. Status files written into the spool directory are typically zero length and should consume a minimal amount of space (a few MB at most) and very little IO. All the information in this directory can be recreated so it is not necessary to preserve the spool directory if the cluster is moved to new hardware.</p>
|
||||
|
||||
<p><b>NOTE:</b> In the original implementation of asynchronous archiving, WAL segments were copied to the spool directory before compression and transfer. The new implementation copies WAL directly from the <path>pg_xlog</path> directory. If asynchronous archiving was utilized in <proper>v1.12</proper> or prior, read the <proper>v1.13</proper> release notes carefully before upgrading.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Create the spool directory</title>
|
||||
|
||||
<execute user="root">
|
||||
<exe-cmd>mkdir -m 750 {[spool-path]}</exe-cmd>
|
||||
</execute>
|
||||
<execute user="root">
|
||||
<exe-cmd>chown postgres:postgres {[spool-path]}</exe-cmd>
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>The spool path must be configured and asynchronous archiving enabled. Asynchronous archiving automatically confers some benefit by reducing the number of ssh connections made to the backup server, but setting <br-option>process-max</br-option> can drastically improve performance. Be sure not to set <br-option>process-max</br-option> so high that it affects normal database operations.</p>
|
||||
|
||||
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
||||
<title>Configure the spool path and asynchronous archiving</title>
|
||||
|
||||
<backrest-config-option section="global" key="spool-path">{[spool-path]}</backrest-config-option>
|
||||
<backrest-config-option section="global" key="archive-async">y</backrest-config-option>
|
||||
<backrest-config-option section="global:archive-push" key="process-max">2</backrest-config-option>
|
||||
</backrest-config>
|
||||
|
||||
<p>The <file>[stanza]-archive-push-async.log</file> file can be used to monitor the activity of the asynchronous process. A good way to test this is to quickly push a number of WAL segments.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Test parallel asynchronous archiving</title>
|
||||
|
||||
<execute output="n" show="n">
|
||||
<exe-cmd>rm -f /var/log/pgbackrest/demo-archive-push-async.log</exe-cmd>
|
||||
</execute>
|
||||
|
||||
<execute output="n">
|
||||
<exe-cmd>
|
||||
psql -c "
|
||||
select pg_create_restore_point('test async push'); select pg_switch_xlog();
|
||||
select pg_create_restore_point('test async push'); select pg_switch_xlog();
|
||||
select pg_create_restore_point('test async push'); select pg_switch_xlog();
|
||||
select pg_create_restore_point('test async push'); select pg_switch_xlog();
|
||||
select pg_create_restore_point('test async push'); select pg_switch_xlog();"
|
||||
</exe-cmd>
|
||||
</execute>
|
||||
|
||||
<execute>
|
||||
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info check</exe-cmd>
|
||||
<exe-highlight>WAL segment</exe-highlight>
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>Now the log file will contain parallel, asynchronous activity.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Check results in the log</title>
|
||||
|
||||
<execute output="y">
|
||||
<exe-cmd>cat /var/log/pgbackrest/demo-archive-push-async.log</exe-cmd>
|
||||
<exe-highlight> WAL file\(s\) to archive|pushed WAL file 0000000</exe-highlight>
|
||||
</execute>
|
||||
</execute-list>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<!-- SECTION => PARALLEL BACKUP-RESTORE -->
|
||||
@ -2222,6 +2153,8 @@
|
||||
|
||||
<backrest-config-option section="demo" key="recovery-option">standby_mode=on</backrest-config-option>
|
||||
|
||||
<backrest-config-option section="global" key="log-level-file">detail</backrest-config-option>
|
||||
|
||||
<backrest-config-option section="global" key="log-level-stderr">off</backrest-config-option>
|
||||
<backrest-config-option section="global" key="log-timestamp">n</backrest-config-option>
|
||||
</backrest-config>
|
||||
@ -2518,6 +2451,158 @@
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<!-- SECTION => ASYNCHRONOUS ARCHIVING -->
|
||||
<section id="async-archiving" depend="/replication">
|
||||
<title>Asynchronous Archiving</title>
|
||||
|
||||
<p>Asynchronous archiving is enabled with the <br-option>archive-async</br-option> option. This option enables asynchronous operation for both the <cmd>archive-push</cmd> and <cmd>archive-get</cmd> commands.</p>
|
||||
|
||||
<p>A spool path is required. The commands will store transient data here but each command works quite a bit differently so spool path usage is described in detail in each section.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Create the spool directory</title>
|
||||
|
||||
<execute user="root">
|
||||
<exe-cmd>mkdir -m 750 {[spool-path]}</exe-cmd>
|
||||
</execute>
|
||||
<execute user="root">
|
||||
<exe-cmd>chown postgres:postgres {[spool-path]}</exe-cmd>
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<execute-list host="{[host-pg2]}">
|
||||
<title>Create the spool directory</title>
|
||||
|
||||
<execute user="root">
|
||||
<exe-cmd>mkdir -m 750 {[spool-path]}</exe-cmd>
|
||||
</execute>
|
||||
<execute user="root">
|
||||
<exe-cmd>chown postgres:postgres {[spool-path]}</exe-cmd>
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>The spool path must be configured and asynchronous archiving enabled. Asynchronous archiving automatically confers some benefit by reducing the number of connections made to remote storage, but setting <br-option>process-max</br-option> can drastically improve performance by parallelizing operations. Be sure not to set <br-option>process-max</br-option> so high that it affects normal database operations.</p>
|
||||
|
||||
<backrest-config host="{[host-pg1]}" file="{[backrest-config-demo]}">
|
||||
<title>Configure the spool path and asynchronous archiving</title>
|
||||
|
||||
<backrest-config-option section="global" key="spool-path">{[spool-path]}</backrest-config-option>
|
||||
<backrest-config-option section="global" key="archive-async">y</backrest-config-option>
|
||||
<backrest-config-option section="global:archive-push" key="process-max">2</backrest-config-option>
|
||||
<backrest-config-option section="global:archive-get" key="process-max">2</backrest-config-option>
|
||||
</backrest-config>
|
||||
|
||||
<backrest-config host="{[host-pg2]}" file="{[backrest-config-demo]}">
|
||||
<title>Configure the spool path and asynchronous archiving</title>
|
||||
|
||||
<backrest-config-option section="global" key="spool-path">{[spool-path]}</backrest-config-option>
|
||||
<backrest-config-option section="global" key="archive-async">y</backrest-config-option>
|
||||
<backrest-config-option section="global:archive-push" key="process-max">2</backrest-config-option>
|
||||
<backrest-config-option section="global:archive-get" key="process-max">2</backrest-config-option>
|
||||
</backrest-config>
|
||||
|
||||
<p>Note that <br-option>process-max</br-option> is configured using command sections so that the option is not used by backup and restore. This also allows different values for <cmd>archive-push</cmd> and <cmd>archive-get</cmd>.</p>
|
||||
|
||||
<p>For demonstration purposes streaming replication will be broken to force <postgres/> to get WAL using the <pg-option>restore_command</pg-option>.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Break streaming replication by changing the replication password</title>
|
||||
|
||||
<execute output="y" filter="n">
|
||||
<exe-cmd>
|
||||
psql -c "alter user replicator password 'bogus'"
|
||||
</exe-cmd>
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<execute-list host="{[host-pg2]}">
|
||||
<title>Restart standby to break connection</title>
|
||||
|
||||
<execute user="root">
|
||||
<exe-cmd>{[pg-cluster-restart]}</exe-cmd>
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<section id="async-archive-push">
|
||||
<title>Archive Push</title>
|
||||
|
||||
<p>The asynchronous <cmd>archive-push</cmd> command offloads WAL archiving to a separate process (or processes) to improve throughput. It works by <quote>looking ahead</quote> to see which WAL segments are ready to be archived beyond the request that <postgres/> is currently making via the <code>archive_command</code>. WAL segments are transferred to the archive directly from the <path>pg_xlog</path>/<path>pg_wal</path> directory and success is only returned by the <code>archive_command</code> when the WAL segment has been safely stored in the archive.</p>
|
||||
|
||||
<p>The spool path holds the current status of WAL archiving. Status files written into the spool directory are typically zero length and should consume a minimal amount of space (a few MB at most) and very little IO. All the information in this directory can be recreated so it is not necessary to preserve the spool directory if the cluster is moved to new hardware.</p>
|
||||
|
||||
<p><b>NOTE:</b> In the original implementation of asynchronous archiving, WAL segments were copied to the spool directory before compression and transfer. The new implementation copies WAL directly from the <path>pg_xlog</path> directory. If asynchronous archiving was utilized in <proper>v1.12</proper> or prior, read the <proper>v1.13</proper> release notes carefully before upgrading.</p>
|
||||
|
||||
<p>The <file>[stanza]-archive-push-async.log</file> file can be used to monitor the activity of the asynchronous process. A good way to test this is to quickly push a number of WAL segments.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Test parallel asynchronous archiving</title>
|
||||
|
||||
<execute output="n" show="n">
|
||||
<exe-cmd>rm -f /var/log/pgbackrest/demo-archive-push-async.log</exe-cmd>
|
||||
</execute>
|
||||
|
||||
<execute output="n">
|
||||
<exe-cmd>
|
||||
psql -c "
|
||||
select pg_create_restore_point('test async push'); select pg_switch_xlog();
|
||||
select pg_create_restore_point('test async push'); select pg_switch_xlog();
|
||||
select pg_create_restore_point('test async push'); select pg_switch_xlog();
|
||||
select pg_create_restore_point('test async push'); select pg_switch_xlog();
|
||||
select pg_create_restore_point('test async push'); select pg_switch_xlog();"
|
||||
</exe-cmd>
|
||||
</execute>
|
||||
|
||||
<execute>
|
||||
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info check</exe-cmd>
|
||||
<exe-highlight>WAL segment</exe-highlight>
|
||||
</execute>
|
||||
</execute-list>
|
||||
|
||||
<p>Now the log file will contain parallel, asynchronous activity.</p>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Check results in the log</title>
|
||||
|
||||
<execute output="y">
|
||||
<exe-cmd>cat /var/log/pgbackrest/demo-archive-push-async.log</exe-cmd>
|
||||
<exe-highlight> WAL file\(s\) to archive|pushed WAL file 0000000</exe-highlight>
|
||||
</execute>
|
||||
</execute-list>
|
||||
</section>
|
||||
|
||||
<section id="async-archive-get">
|
||||
<title>Archive Get</title>
|
||||
|
||||
<p>The asynchronous <cmd>archive-get</cmd> command maintains a local queue of WAL to improve throughput. If a WAL segment is not found in the queue it is fetched from the repository along with enough consecutive WAL to fill the queue. The maximum size of the queue is defined by <br-option>archive-get-queue-max</br-option>. Whenever the queue is less than half full more WAL will be fetched to fill it.</p>
|
||||
|
||||
<p>Asynchronous operation is most useful in environments that generate a lot of WAL or have a high latency connection to the repository storage (i.e., <proper>S3</proper>). In the case of a high latency connection it may be a good idea to increase <br-option>process-max</br-option>.</p>
|
||||
|
||||
<p>The <file>[stanza]-archive-get-async.log</file> file can be used to monitor the activity of the asynchronous process.</p>
|
||||
|
||||
<execute-list host="{[host-pg2]}">
|
||||
<title>Check results in the log</title>
|
||||
|
||||
<execute show="n">
|
||||
<exe-cmd>sleep 5</exe-cmd>
|
||||
</execute>
|
||||
<execute output="y">
|
||||
<exe-cmd>cat /var/log/pgbackrest/demo-archive-get-async.log</exe-cmd>
|
||||
<exe-highlight>got WAL file [0-F]{24} from archive</exe-highlight>
|
||||
</execute>
|
||||
</execute-list>
|
||||
</section>
|
||||
|
||||
<execute-list host="{[host-pg1]}">
|
||||
<title>Fix streaming replication by changing the replication password</title>
|
||||
|
||||
<execute output="y" filter="n">
|
||||
<exe-cmd>
|
||||
psql -c "alter user replicator password 'jw8s0F4'"
|
||||
</exe-cmd>
|
||||
</execute>
|
||||
</execute-list>
|
||||
</section>
|
||||
|
||||
<!-- SECTION => STANDBY-BACKUP -->
|
||||
<section id="standby-backup" depend="/replication/streaming">
|
||||
<title>Backup from a Standby</title>
|
||||
|
199
lib/pgBackRest/Archive/Get/Async.pm
Normal file
199
lib/pgBackRest/Archive/Get/Async.pm
Normal file
@ -0,0 +1,199 @@
|
||||
####################################################################################################################################
|
||||
# ARCHIVE GET ASYNC MODULE
|
||||
####################################################################################################################################
|
||||
package pgBackRest::Archive::Get::Async;
|
||||
use parent 'pgBackRest::Archive::Get::Get';
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => qw(all);
|
||||
use Carp qw(confess);
|
||||
use English '-no_match_vars';
|
||||
|
||||
use pgBackRest::Common::Exception;
|
||||
use pgBackRest::Common::Lock;
|
||||
use pgBackRest::Common::Log;
|
||||
use pgBackRest::Archive::Common;
|
||||
use pgBackRest::Archive::Info;
|
||||
use pgBackRest::Common::String;
|
||||
use pgBackRest::Common::Wait;
|
||||
use pgBackRest::Config::Config;
|
||||
use pgBackRest::Db;
|
||||
use pgBackRest::DbVersion;
|
||||
use pgBackRest::LibC qw(:lock);
|
||||
use pgBackRest::Protocol::Local::Process;
|
||||
use pgBackRest::Protocol::Helper;
|
||||
use pgBackRest::Storage::Helper;
|
||||
use pgBackRest::Version;
|
||||
|
||||
####################################################################################################################################
|
||||
# constructor
|
||||
####################################################################################################################################
|
||||
sub new
|
||||
{
|
||||
my $class = shift; # Class name
|
||||
|
||||
# Init object
|
||||
my $self = $class->SUPER::new();
|
||||
bless $self, $class;
|
||||
|
||||
# Assign function parameters, defaults, and log debug info
|
||||
(
|
||||
my $strOperation,
|
||||
$self->{strSpoolPath},
|
||||
$self->{strBackRestBin},
|
||||
$self->{rstryWal},
|
||||
) =
|
||||
logDebugParam
|
||||
(
|
||||
__PACKAGE__ . '->new', \@_,
|
||||
{name => 'strSpoolPath'},
|
||||
{name => 'strBackRestBin', default => backrestBin()},
|
||||
{name => 'rstryWal'},
|
||||
);
|
||||
|
||||
# Return from function and log return values if any
|
||||
return logDebugReturn
|
||||
(
|
||||
$strOperation,
|
||||
{name => 'self', value => $self}
|
||||
);
|
||||
}
|
||||
|
||||
####################################################################################################################################
|
||||
# Create the spool directory and initialize the archive process.
|
||||
####################################################################################################################################
|
||||
sub initServer
|
||||
{
|
||||
my $self = shift;
|
||||
|
||||
# Assign function parameters, defaults, and log debug info
|
||||
my ($strOperation) = logDebugParam(__PACKAGE__ . '->initServer');
|
||||
|
||||
# Initialize the archive process
|
||||
$self->{oArchiveProcess} = new pgBackRest::Protocol::Local::Process(
|
||||
CFGOPTVAL_LOCAL_TYPE_BACKUP, cfgOption(CFGOPT_PROTOCOL_TIMEOUT) < 60 ? cfgOption(CFGOPT_PROTOCOL_TIMEOUT) / 2 : 30,
|
||||
$self->{strBackRestBin}, false);
|
||||
$self->{oArchiveProcess}->hostAdd(1, cfgOption(CFGOPT_PROCESS_MAX));
|
||||
|
||||
# Return from function and log return values if any
|
||||
return logDebugReturn($strOperation);
|
||||
}
|
||||
|
||||
####################################################################################################################################
|
||||
# Setup the server and process the queue. This function is separate from processQueue() for testing purposes.
|
||||
####################################################################################################################################
|
||||
sub process
|
||||
{
|
||||
my $self = shift;
|
||||
|
||||
# Assign function parameters, defaults, and log debug info
|
||||
my ($strOperation) = logDebugParam(__PACKAGE__ . '->process');
|
||||
|
||||
# Open the log file
|
||||
logFileSet(storageLocal(), cfgOption(CFGOPT_LOG_PATH) . '/' . cfgOption(CFGOPT_STANZA) . '-archive-get-async');
|
||||
|
||||
# There is no loop here because it seems wise to let the async process exit periodically. As the queue grows each async
|
||||
# execution will naturally run longer. This behavior is also far easier to test.
|
||||
$self->initServer();
|
||||
$self->processQueue();
|
||||
|
||||
# Return from function and log return values if any
|
||||
return logDebugReturn($strOperation);
|
||||
}
|
||||
|
||||
####################################################################################################################################
|
||||
# Get WAL from archive
|
||||
####################################################################################################################################
|
||||
sub processQueue
|
||||
{
|
||||
my $self = shift;
|
||||
|
||||
# Assign function parameters, defaults, and log debug info
|
||||
my ($strOperation) = logDebugParam(__PACKAGE__ . '->processQueue');
|
||||
|
||||
# Queue the jobs
|
||||
foreach my $strWalFile (@{$self->{rstryWal}})
|
||||
{
|
||||
$self->{oArchiveProcess}->queueJob(
|
||||
1, 'default', $strWalFile, OP_ARCHIVE_GET_FILE, [$strWalFile, "$self->{strSpoolPath}/${strWalFile}", true]);
|
||||
}
|
||||
|
||||
# Process jobs
|
||||
my $iFoundTotal = 0;
|
||||
my $iMissingTotal = 0;
|
||||
my $iErrorTotal = 0;
|
||||
|
||||
&log(INFO,
|
||||
'get ' . @{$self->{rstryWal}} . ' WAL file(s) from archive: ' .
|
||||
${$self->{rstryWal}}[0] . (@{$self->{rstryWal}} > 1 ? "...${$self->{rstryWal}}[-1]" : ''));
|
||||
|
||||
eval
|
||||
{
|
||||
# Check for a stop lock
|
||||
lockStopTest();
|
||||
|
||||
while (my $hyJob = $self->{oArchiveProcess}->process())
|
||||
{
|
||||
foreach my $hJob (@{$hyJob})
|
||||
{
|
||||
my $strWalFile = @{$hJob->{rParam}}[0];
|
||||
my $iResult = @{$hJob->{rResult}}[0];
|
||||
|
||||
# If error then write out an error file
|
||||
if (defined($hJob->{oException}))
|
||||
{
|
||||
archiveAsyncStatusWrite(
|
||||
WAL_STATUS_ERROR, $self->{strSpoolPath}, $strWalFile, $hJob->{oException}->code(),
|
||||
$hJob->{oException}->message());
|
||||
|
||||
$iErrorTotal++;
|
||||
|
||||
&log(WARN,
|
||||
"could not get WAL file ${strWalFile} from archive (will be retried): [" .
|
||||
$hJob->{oException}->code() . "] " . $hJob->{oException}->message());
|
||||
}
|
||||
# Else write a '.ok' file to indicate that the WAL was not found but there was no error
|
||||
elsif ($iResult == 1)
|
||||
{
|
||||
archiveAsyncStatusWrite(WAL_STATUS_OK, $self->{strSpoolPath}, $strWalFile);
|
||||
|
||||
$iMissingTotal++;
|
||||
|
||||
&log(DETAIL, "WAL file ${strWalFile} not found in archive", undef, undef, undef, $hJob->{iProcessId});
|
||||
}
|
||||
# Else success so just output a log message
|
||||
else
|
||||
{
|
||||
$iFoundTotal++;
|
||||
|
||||
&log(DETAIL, "got WAL file ${strWalFile} from archive", undef, undef, undef, $hJob->{iProcessId});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
or do
|
||||
{
|
||||
# Get error info
|
||||
my $iCode = exceptionCode($EVAL_ERROR);
|
||||
my $strMessage = exceptionMessage($EVAL_ERROR);
|
||||
|
||||
# Error all queued jobs
|
||||
foreach my $strWalFile (@{$self->{rstryWal}})
|
||||
{
|
||||
archiveAsyncStatusWrite(WAL_STATUS_ERROR, $self->{strSpoolPath}, $strWalFile, $iCode, $strMessage);
|
||||
}
|
||||
};
|
||||
|
||||
return logDebugReturn
|
||||
(
|
||||
$strOperation,
|
||||
{name => 'iNewTotal', value => scalar(@{$self->{rstryWal}})},
|
||||
{name => 'iFoundTotal', value => $iFoundTotal},
|
||||
{name => 'iMissingTotal', value => $iMissingTotal},
|
||||
{name => 'iErrorTotal', value => $iErrorTotal}
|
||||
);
|
||||
}
|
||||
|
||||
1;
|
@ -140,13 +140,15 @@ sub archiveGetFile
|
||||
(
|
||||
$strOperation,
|
||||
$strSourceArchive,
|
||||
$strDestinationFile
|
||||
$strDestinationFile,
|
||||
$bAtomic,
|
||||
) =
|
||||
logDebugParam
|
||||
(
|
||||
__PACKAGE__ . '::archiveGetFile', \@_,
|
||||
{name => 'strSourceArchive'},
|
||||
{name => 'strDestinationFile'}
|
||||
{name => 'strDestinationFile'},
|
||||
{name => 'bAtomic'},
|
||||
);
|
||||
|
||||
lockStopTest();
|
||||
@ -185,7 +187,7 @@ sub archiveGetFile
|
||||
strCipherPass => defined($strCipherPass) ? $strCipherPass : undef}),
|
||||
storageDb()->openWrite(
|
||||
$strDestinationFile,
|
||||
{rhyFilter => $bSourceCompressed ?
|
||||
{bAtomic => $bAtomic, rhyFilter => $bSourceCompressed ?
|
||||
[{strClass => STORAGE_FILTER_GZIP, rxyParam => [{strCompressType => STORAGE_DECOMPRESS}]}] : undef}));
|
||||
}
|
||||
|
||||
|
@ -43,42 +43,60 @@ sub process
|
||||
my
|
||||
(
|
||||
$strOperation,
|
||||
$strSourceArchive,
|
||||
$strDestinationFile
|
||||
$rstryCommandArg,
|
||||
) =
|
||||
logDebugParam
|
||||
(
|
||||
__PACKAGE__ . '->process', \@_,
|
||||
{name => 'strSourceArchive'},
|
||||
{name => 'strDestinationFile'}
|
||||
{name => 'rstryCommandArg'},
|
||||
);
|
||||
|
||||
my $iResult = 0;
|
||||
|
||||
# Make sure the command happens on the db side
|
||||
if (!isDbLocal())
|
||||
{
|
||||
confess &log(ERROR, cfgCommandName(CFGCMD_ARCHIVE_GET) . ' operation must run on db host', ERROR_HOST_INVALID);
|
||||
}
|
||||
|
||||
# Make sure the archive file is defined
|
||||
if (!defined($strSourceArchive))
|
||||
# Start the async process and wait for WAL to complete
|
||||
if (cfgOption(CFGOPT_ARCHIVE_ASYNC))
|
||||
{
|
||||
confess &log(ERROR, 'WAL segment not provided', ERROR_PARAM_REQUIRED);
|
||||
# Load module dynamically
|
||||
require pgBackRest::Archive::Get::Async;
|
||||
(new pgBackRest::Archive::Get::Async(
|
||||
storageSpool()->pathGet(STORAGE_SPOOL_ARCHIVE_IN), $self->{strBackRestBin}, $rstryCommandArg))->process();
|
||||
}
|
||||
|
||||
# Make sure the destination file is defined
|
||||
if (!defined($strDestinationFile))
|
||||
# Else push synchronously
|
||||
else
|
||||
{
|
||||
confess &log(ERROR, 'WAL segment destination not provided', ERROR_PARAM_REQUIRED);
|
||||
}
|
||||
# Make sure the archive file is defined
|
||||
my $strSourceArchive = ${$rstryCommandArg}[0];
|
||||
|
||||
# Info for the Postgres log
|
||||
&log(INFO, 'get WAL segment ' . $strSourceArchive);
|
||||
if (!defined($strSourceArchive))
|
||||
{
|
||||
confess &log(ERROR, 'WAL segment not provided', ERROR_PARAM_REQUIRED);
|
||||
}
|
||||
|
||||
# Make sure the destination file is defined
|
||||
my $strDestinationFile = ${$rstryCommandArg}[1];
|
||||
|
||||
if (!defined($strDestinationFile))
|
||||
{
|
||||
confess &log(ERROR, 'WAL segment destination not provided', ERROR_PARAM_REQUIRED);
|
||||
}
|
||||
|
||||
$iResult = archiveGetFile($strSourceArchive, $strDestinationFile, false);
|
||||
|
||||
# Info for the Postgres log
|
||||
&log(INFO, 'got WAL segment ' . $strSourceArchive);
|
||||
}
|
||||
|
||||
# Return from function and log return values if any
|
||||
return logDebugReturn
|
||||
(
|
||||
$strOperation,
|
||||
{name => 'iResult', value => archiveGetFile($strSourceArchive, $strDestinationFile), trace => true},
|
||||
{name => 'iResult', value => $iResult, trace => true},
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -9,14 +9,6 @@ use warnings FATAL => qw(all);
|
||||
use Carp qw(confess);
|
||||
use English '-no_match_vars';
|
||||
|
||||
use Exporter qw(import);
|
||||
our @EXPORT = qw();
|
||||
use Fcntl qw(SEEK_CUR O_RDONLY O_WRONLY O_CREAT);
|
||||
use File::Basename qw(dirname basename);
|
||||
use IO::Socket::UNIX;
|
||||
use POSIX qw(setsid);
|
||||
use Scalar::Util qw(blessed);
|
||||
|
||||
use pgBackRest::Common::Exception;
|
||||
use pgBackRest::Common::Lock;
|
||||
use pgBackRest::Common::Log;
|
||||
@ -153,6 +145,9 @@ sub processQueue
|
||||
|
||||
eval
|
||||
{
|
||||
# Check for a stop lock
|
||||
lockStopTest();
|
||||
|
||||
# Hold a lock when the repo is remote to be sure no other process is pushing WAL
|
||||
!isRepoLocal() && protocolGet(CFGOPTVAL_REMOTE_TYPE_BACKUP);
|
||||
|
||||
@ -176,7 +171,7 @@ sub processQueue
|
||||
$iErrorTotal++;
|
||||
|
||||
&log(WARN,
|
||||
"could not push WAl file ${strWalFile} to archive (will be retried): [" .
|
||||
"could not push WAL file ${strWalFile} to archive (will be retried): [" .
|
||||
$hJob->{oException}->code() . "] " . $hJob->{oException}->message());
|
||||
}
|
||||
# Else write success
|
||||
|
@ -56,9 +56,6 @@ sub process
|
||||
confess &log(ERROR, 'WAL file to push required', ERROR_PARAM_REQUIRED);
|
||||
}
|
||||
|
||||
# Check for a stop lock
|
||||
lockStopTest();
|
||||
|
||||
# Extract WAL path and file
|
||||
my $strWalPath = dirname(walPath($strWalPathFile, cfgOption(CFGOPT_PG_PATH, false), cfgCommandName(cfgCommandGet())));
|
||||
my $strWalFile = basename($strWalPathFile);
|
||||
@ -74,6 +71,9 @@ sub process
|
||||
# Else push synchronously
|
||||
else
|
||||
{
|
||||
# Check for a stop lock
|
||||
lockStopTest();
|
||||
|
||||
# Load module dynamically
|
||||
require pgBackRest::Archive::Push::File;
|
||||
pgBackRest::Archive::Push::File->import();
|
||||
|
@ -86,7 +86,7 @@ sub main
|
||||
require pgBackRest::Archive::Get::Get;
|
||||
pgBackRest::Archive::Get::Get->import();
|
||||
|
||||
$result = new pgBackRest::Archive::Get::Get()->process($stryCommandArg[0], $stryCommandArg[1]);
|
||||
$result = new pgBackRest::Archive::Get::Get()->process(\@stryCommandArg);
|
||||
}
|
||||
|
||||
# Process remote command
|
||||
@ -279,10 +279,16 @@ sub main
|
||||
# are other errors that could be arriving in $EVAL_ERROR.
|
||||
my $oException = defined($EVAL_ERROR) && length($EVAL_ERROR) > 0 ? $EVAL_ERROR : logErrorLast();
|
||||
|
||||
# If a backrest exception then only return the code since the message has already been logged
|
||||
# If a backrest exception
|
||||
if (isException(\$oException))
|
||||
{
|
||||
$result = $oException->code();
|
||||
|
||||
# Only return message if we are in an async process since this will not be logged to the console
|
||||
if (!$bConfigLoaded && cfgOption(CFGOPT_ARCHIVE_ASYNC))
|
||||
{
|
||||
$message = $oException->message();
|
||||
}
|
||||
}
|
||||
# Else a regular Perl exception
|
||||
else
|
||||
|
@ -28,11 +28,9 @@ use constant OP_ARCHIVE_GET_CHECK => 'archiveC
|
||||
use constant OP_ARCHIVE_PUSH_CHECK => 'archivePushCheck';
|
||||
push @EXPORT, qw(OP_ARCHIVE_PUSH_CHECK);
|
||||
|
||||
# Archive Push Async Module
|
||||
use constant OP_ARCHIVE_PUSH_ASYNC => 'archivePushAsync';
|
||||
push @EXPORT, qw(OP_ARCHIVE_PUSH_ASYNC);
|
||||
|
||||
# Archive File Module
|
||||
use constant OP_ARCHIVE_GET_FILE => 'archiveGetFile';
|
||||
push @EXPORT, qw(OP_ARCHIVE_GET_FILE);
|
||||
use constant OP_ARCHIVE_PUSH_FILE => 'archivePushFile';
|
||||
push @EXPORT, qw(OP_ARCHIVE_PUSH_FILE);
|
||||
|
||||
|
@ -8,6 +8,7 @@ use strict;
|
||||
use warnings FATAL => qw(all);
|
||||
use Carp qw(confess);
|
||||
|
||||
use pgBackRest::Archive::Get::File;
|
||||
use pgBackRest::Archive::Push::File;
|
||||
use pgBackRest::Backup::File;
|
||||
use pgBackRest::Common::Log;
|
||||
@ -54,6 +55,7 @@ sub init
|
||||
# Create anonymous subs for each command
|
||||
my $hCommandMap =
|
||||
{
|
||||
&OP_ARCHIVE_GET_FILE => sub {archiveGetFile(@{shift()})},
|
||||
&OP_ARCHIVE_PUSH_FILE => sub {archivePushFile(@{shift()})},
|
||||
&OP_BACKUP_FILE => sub {backupFile(@{shift()})},
|
||||
&OP_RESTORE_FILE => sub {restoreFile(@{shift()})},
|
||||
|
@ -25,6 +25,8 @@ use constant STORAGE_LOCAL => '<LOCAL>'
|
||||
|
||||
use constant STORAGE_SPOOL => '<SPOOL>';
|
||||
push @EXPORT, qw(STORAGE_SPOOL);
|
||||
use constant STORAGE_SPOOL_ARCHIVE_IN => '<SPOOL:ARCHIVE:IN>';
|
||||
push @EXPORT, qw(STORAGE_SPOOL_ARCHIVE_IN);
|
||||
use constant STORAGE_SPOOL_ARCHIVE_OUT => '<SPOOL:ARCHIVE:OUT>';
|
||||
push @EXPORT, qw(STORAGE_SPOOL_ARCHIVE_OUT);
|
||||
|
||||
@ -108,6 +110,7 @@ sub storageSpool
|
||||
# Path rules
|
||||
my $hRule =
|
||||
{
|
||||
&STORAGE_SPOOL_ARCHIVE_IN => "archive/${strStanza}/in",
|
||||
&STORAGE_SPOOL_ARCHIVE_OUT => "archive/${strStanza}/out",
|
||||
};
|
||||
|
||||
|
@ -118,6 +118,7 @@ sub libcAutoExportTag
|
||||
'CFGOPT_ARCHIVE_ASYNC',
|
||||
'CFGOPT_ARCHIVE_CHECK',
|
||||
'CFGOPT_ARCHIVE_COPY',
|
||||
'CFGOPT_ARCHIVE_GET_QUEUE_MAX',
|
||||
'CFGOPT_ARCHIVE_PUSH_QUEUE_MAX',
|
||||
'CFGOPT_ARCHIVE_TIMEOUT',
|
||||
'CFGOPT_BACKUP_STANDBY',
|
||||
|
@ -32,6 +32,7 @@ Option constants
|
||||
#define CFGOPT_ARCHIVE_ASYNC cfgOptArchiveAsync
|
||||
#define CFGOPT_ARCHIVE_CHECK cfgOptArchiveCheck
|
||||
#define CFGOPT_ARCHIVE_COPY cfgOptArchiveCopy
|
||||
#define CFGOPT_ARCHIVE_GET_QUEUE_MAX cfgOptArchiveGetQueueMax
|
||||
#define CFGOPT_ARCHIVE_PUSH_QUEUE_MAX cfgOptArchivePushQueueMax
|
||||
#define CFGOPT_ARCHIVE_TIMEOUT cfgOptArchiveTimeout
|
||||
#define CFGOPT_BACKUP_STANDBY cfgOptBackupStandby
|
||||
|
@ -55,6 +55,7 @@ DESTDIR =
|
||||
####################################################################################################################################
|
||||
SRCS = \
|
||||
command/archive/common.c \
|
||||
command/archive/get/get.c \
|
||||
command/archive/push/push.c \
|
||||
command/help/help.c \
|
||||
command/command.c \
|
||||
@ -82,6 +83,7 @@ SRCS = \
|
||||
config/parse.c \
|
||||
perl/config.c \
|
||||
perl/exec.c \
|
||||
postgres/info.c \
|
||||
storage/driver/posix/driver.c \
|
||||
storage/driver/posix/driverFile.c \
|
||||
storage/driver/posix/driverRead.c \
|
||||
|
@ -17,14 +17,16 @@ Archive Push Command
|
||||
Check for ok/error status files in the spool in/out directory
|
||||
***********************************************************************************************************************************/
|
||||
bool
|
||||
archiveAsyncStatus(const String *walSegment, bool confessOnError)
|
||||
archiveAsyncStatus(ArchiveMode archiveMode, const String *walSegment, bool confessOnError)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
MEM_CONTEXT_TEMP_BEGIN()
|
||||
{
|
||||
String *spoolQueue = strNew(archiveMode == archiveModeGet ? STORAGE_SPOOL_ARCHIVE_IN : STORAGE_SPOOL_ARCHIVE_OUT);
|
||||
|
||||
StringList *fileList = storageListP(
|
||||
storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_OUT), .expression = strNewFmt("^%s\\.(ok|error)$", strPtr(walSegment)));
|
||||
storageSpool(), spoolQueue, .expression = strNewFmt("^%s\\.(ok|error)$", strPtr(walSegment)));
|
||||
|
||||
if (fileList != NULL && strLstSize(fileList) > 0)
|
||||
{
|
||||
@ -33,14 +35,14 @@ archiveAsyncStatus(const String *walSegment, bool confessOnError)
|
||||
{
|
||||
THROW(
|
||||
AssertError, "multiple status files found in '%s' for WAL segment '%s'",
|
||||
strPtr(storagePathNP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_OUT))), strPtr(walSegment));
|
||||
strPtr(storagePath(storageSpool(), spoolQueue)), strPtr(walSegment));
|
||||
}
|
||||
|
||||
// Get the status file content
|
||||
const String *statusFile = strLstGet(fileList, 0);
|
||||
|
||||
String *content = strNewBuf(
|
||||
storageGetNP(storageNewReadNP(storageSpool(), strNewFmt("%s/%s", STORAGE_SPOOL_ARCHIVE_OUT, strPtr(statusFile)))));
|
||||
storageGetNP(storageNewReadNP(storageSpool(), strNewFmt("%s/%s", strPtr(spoolQueue), strPtr(statusFile)))));
|
||||
|
||||
// Get the code and message if the file has content
|
||||
int code = 0;
|
||||
|
@ -6,12 +6,36 @@ Archive Common
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Archive mode enum
|
||||
|
||||
Used for functions that are common to both archive-push and archive-get so they can tailor their behavior to the command being run.
|
||||
***********************************************************************************************************************************/
|
||||
typedef enum
|
||||
{
|
||||
archiveModePush,
|
||||
archiveModeGet,
|
||||
} ArchiveMode;
|
||||
|
||||
#include "common/type/stringList.h"
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
WAL segment constants
|
||||
***********************************************************************************************************************************/
|
||||
// Only match on a WAL segment without checksum appended
|
||||
#define WAL_SEGMENT_REGEXP "^[0-F]{24}$"
|
||||
|
||||
// Defines the size of standard WAL segment name -- this should never changed
|
||||
#define WAL_SEGMENT_NAME_SIZE ((uint)24)
|
||||
|
||||
// Default size of a WAL segment
|
||||
#define WAL_SEGMENT_DEFAULT_SIZE ((size_t)(16 * 1024 * 1024))
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Functions
|
||||
***********************************************************************************************************************************/
|
||||
bool archiveAsyncStatus(const String *walSegment, bool confessOnError);
|
||||
bool archiveAsyncStatus(ArchiveMode archiveMode, const String *walSegment, bool confessOnError);
|
||||
|
||||
String *walSegmentNext(const String *walSegment, size_t walSegmentSize, uint pgVersion);
|
||||
StringList *walSegmentRange(const String *walSegmentBegin, size_t walSegmentSize, uint pgVersion, uint range);
|
||||
|
||||
|
274
src/command/archive/get/get.c
Normal file
274
src/command/archive/get/get.c
Normal file
@ -0,0 +1,274 @@
|
||||
/***********************************************************************************************************************************
|
||||
Archive Get Command
|
||||
***********************************************************************************************************************************/
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "command/archive/common.h"
|
||||
#include "command/command.h"
|
||||
#include "common/fork.h"
|
||||
#include "common/log.h"
|
||||
#include "common/memContext.h"
|
||||
#include "common/regExp.h"
|
||||
#include "common/wait.h"
|
||||
#include "config/config.h"
|
||||
#include "config/load.h"
|
||||
#include "perl/exec.h"
|
||||
#include "postgres/info.h"
|
||||
#include "storage/helper.h"
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Clean the queue and prepare a list of WAL segments that the async process should get
|
||||
***********************************************************************************************************************************/
|
||||
static StringList *
|
||||
queueNeed(const String *walSegment, bool found, size_t queueSize, size_t walSegmentSize, uint pgVersion)
|
||||
{
|
||||
StringList *result = strLstNew();
|
||||
|
||||
MEM_CONTEXT_TEMP_BEGIN()
|
||||
{
|
||||
// Determine the first WAL segment for the async process to get. If the WAL segment requested by
|
||||
// PostgreSQL was not found then use that. If the segment was found but the queue is not full then
|
||||
// start with the next segment.
|
||||
const String *walSegmentFirst =
|
||||
found ? walSegmentNext(walSegment, walSegmentSize, pgVersion) : walSegment;
|
||||
|
||||
// Determine how many WAL segments should be in the queue. The queue total must be at least 2 or it doesn't make sense to
|
||||
// have async turned on at all.
|
||||
uint walSegmentQueueTotal = (uint)(queueSize / walSegmentSize);
|
||||
|
||||
if (walSegmentQueueTotal < 2)
|
||||
walSegmentQueueTotal = 2;
|
||||
|
||||
// Build the ideal queue -- the WAL segments we want in the queue after the async process has run
|
||||
StringList *idealQueue = walSegmentRange(walSegmentFirst, walSegmentSize, pgVersion, walSegmentQueueTotal);
|
||||
|
||||
// Get the list of files actually in the queue
|
||||
StringList *actualQueue = strLstSort(
|
||||
storageListP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN), .errorOnMissing = true), sortOrderAsc);
|
||||
|
||||
// Only preserve files that match the ideal queue. '.error'/'.ok' files are deleted so the async process can try again.
|
||||
RegExp *regExpPreserve = regExpNew(strNewFmt("^(%s)$", strPtr(strLstJoin(idealQueue, "|"))));
|
||||
|
||||
// Build a list of WAL segments that are being kept so we can later make a list of what is needed
|
||||
StringList *keepQueue = strLstNew();
|
||||
|
||||
for (uint actualQueueIdx = 0; actualQueueIdx < strLstSize(actualQueue); actualQueueIdx++)
|
||||
{
|
||||
// Get file from actual queue
|
||||
const String *file = strLstGet(actualQueue, actualQueueIdx);
|
||||
|
||||
// Does this match a file we want to preserve?
|
||||
if (regExpMatch(regExpPreserve, file))
|
||||
strLstAdd(keepQueue, file);
|
||||
|
||||
// Else delete it
|
||||
else
|
||||
storageRemoveNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_IN "/%s", strPtr(file)));
|
||||
}
|
||||
|
||||
// Generate a list of the WAL that are needed by removing kept WAL from the ideal queue
|
||||
for (uint idealQueueIdx = 0; idealQueueIdx < strLstSize(idealQueue); idealQueueIdx++)
|
||||
{
|
||||
if (!strLstExists(keepQueue, strLstGet(idealQueue, idealQueueIdx)))
|
||||
strLstAdd(result, strLstGet(idealQueue, idealQueueIdx));
|
||||
}
|
||||
}
|
||||
MEM_CONTEXT_TEMP_END();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Push a WAL segment to the repository
|
||||
***********************************************************************************************************************************/
|
||||
int
|
||||
cmdArchiveGet()
|
||||
{
|
||||
int result = 1;
|
||||
|
||||
MEM_CONTEXT_TEMP_BEGIN()
|
||||
{
|
||||
// Check the parameters
|
||||
const StringList *commandParam = cfgCommandParam();
|
||||
|
||||
if (strLstSize(commandParam) != 2)
|
||||
{
|
||||
if (strLstSize(commandParam) == 0)
|
||||
THROW(ParamRequiredError, "WAL segment to get required");
|
||||
|
||||
if (strLstSize(commandParam) == 1)
|
||||
THROW(ParamRequiredError, "Path to copy WAL segment required");
|
||||
|
||||
THROW(ParamRequiredError, "extra parameters found");
|
||||
}
|
||||
|
||||
// Get the segment name
|
||||
String *walSegment = strBase(strLstGet(commandParam, 0));
|
||||
|
||||
// Destination is wherever we were told to move the WAL segment. In some cases the path that PostgreSQL passes will not be
|
||||
// absolute so prefix pg-path.
|
||||
const String *walDestination = strLstGet(commandParam, 1);
|
||||
|
||||
if (!strBeginsWithZ(walDestination, "/"))
|
||||
walDestination = strNewFmt("%s/%s", strPtr(cfgOptionStr(cfgOptPgPath)), strPtr(walDestination));
|
||||
|
||||
// Async get can only be performed on WAL segments, history or other files must use synchronous mode
|
||||
if (cfgOptionBool(cfgOptArchiveAsync) && regExpMatchOne(strNew(WAL_SEGMENT_REGEXP), walSegment))
|
||||
{
|
||||
bool found = false; // Has the WAL segment been found yet?
|
||||
bool queueFull = false; // Is the queue half or more full?
|
||||
bool forked = false; // Has the async process been forked yet?
|
||||
bool confessOnError = false; // Should we confess errors?
|
||||
|
||||
// Loop and wait for the WAL segment to be pushed
|
||||
Wait *wait = waitNew(cfgOptionDbl(cfgOptArchiveTimeout));
|
||||
|
||||
do
|
||||
{
|
||||
// Check for errors or missing files. For archive-get '.ok' indicates that the process succeeded but there is no
|
||||
// WAL file to download.
|
||||
if (archiveAsyncStatus(archiveModeGet, walSegment, confessOnError))
|
||||
{
|
||||
storageRemoveP(
|
||||
storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_IN "/%s.ok", strPtr(walSegment)), .errorOnMissing = true);
|
||||
|
||||
LOG_INFO("unable to find WAL segment %s", strPtr(walSegment));
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if the WAL segment is already in the queue
|
||||
found = storageExistsNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_IN "/%s", strPtr(walSegment)));
|
||||
|
||||
// If found then move the WAL segment to the destination directory
|
||||
if (found)
|
||||
{
|
||||
// Source is the WAL segment in the spool queue
|
||||
StorageFileRead *source = storageNewReadNP(
|
||||
storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_IN "/%s", strPtr(walSegment)));
|
||||
|
||||
// A move will be attempted but if the spool queue and the WAL path are on different file systems then a copy
|
||||
// will be performed instead.
|
||||
//
|
||||
// It looks scary that we are disabling syncs and atomicity (in case we need to copy intead of move) but this
|
||||
// is safe because if the system crashes Postgres will not try to reuse a restored WAL segment but will instead
|
||||
// request it again using the restore_command. In the case of a move this hardly matters since path syncs are
|
||||
// cheap but if a copy is required we could save a lot of writes.
|
||||
StorageFileWrite *destination = storageNewWriteP(
|
||||
storageLocalWrite(), walDestination, .noCreatePath = true, .noSyncFile = true, .noSyncPath = true,
|
||||
.noAtomic = true);
|
||||
|
||||
// Move (or copy if required) the file
|
||||
storageMoveNP(source, destination);
|
||||
|
||||
// Log success
|
||||
LOG_INFO("got WAL segment %s asynchronously", strPtr(walSegment));
|
||||
result = 0;
|
||||
|
||||
// Get a list of WAL segments left in the queue
|
||||
StringList *queue = storageListP(
|
||||
storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN), .expression = strNew(WAL_SEGMENT_REGEXP));
|
||||
|
||||
if (strLstSize(queue) > 0)
|
||||
{
|
||||
// Get size of the WAL segment
|
||||
size_t walSegmentSize = storageInfoNP(storageLocal(), walDestination).size;
|
||||
|
||||
// Use WAL segment size to estimate queue size and determine if the async process should be launched
|
||||
queueFull =
|
||||
strLstSize(queue) * walSegmentSize > (size_t)cfgOptionInt64(cfgOptArchiveGetQueueMax) / 2;
|
||||
}
|
||||
}
|
||||
|
||||
// If the WAL segment has not already been found then start the async process to get it. There's no point in
|
||||
// forking the async process off more than once so track that as well. Use an archive lock to prevent more than
|
||||
// one async process being launched.
|
||||
if (!forked && (!found || !queueFull) &&
|
||||
lockAcquire(cfgOptionStr(cfgOptLockPath), cfgOptionStr(cfgOptStanza), cfgLockType(), 0, false))
|
||||
{
|
||||
// Fork off the async process
|
||||
if (fork() == 0)
|
||||
{
|
||||
// Async process returns 0 unless there is an error
|
||||
result = 0;
|
||||
|
||||
// Execute async process and catch exceptions
|
||||
TRY_BEGIN()
|
||||
{
|
||||
// Get the version of PostgreSQL
|
||||
uint pgVersion = pgControlInfo(cfgOptionStr(cfgOptPgPath)).version;
|
||||
|
||||
// Determine WAL segment size -- for now this is the default but for PG11 it will be configurable
|
||||
uint walSegmentSize = WAL_SEGMENT_DEFAULT_SIZE;
|
||||
|
||||
// Create the queue
|
||||
storagePathCreateNP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN));
|
||||
|
||||
// Clean the current queue using the list of WAL that we ideally want in the queue. queueNeed()
|
||||
// will return the list of WAL needed to fill the queue and this will be passed to the async process.
|
||||
cfgCommandParamSet(
|
||||
queueNeed(
|
||||
walSegment, found, (size_t)cfgOptionInt64(cfgOptArchiveGetQueueMax), walSegmentSize,
|
||||
pgVersion));
|
||||
|
||||
// The async process should not output on the console at all
|
||||
cfgOptionSet(cfgOptLogLevelConsole, cfgSourceParam, varNewStrZ("off"));
|
||||
cfgOptionSet(cfgOptLogLevelStderr, cfgSourceParam, varNewStrZ("off"));
|
||||
cfgLoadLogSetting();
|
||||
|
||||
// Open the log file
|
||||
logFileSet(
|
||||
strPtr(strNewFmt("%s/%s-%s-async.log", strPtr(cfgOptionStr(cfgOptLogPath)),
|
||||
strPtr(cfgOptionStr(cfgOptStanza)), cfgCommandName(cfgCommand()))));
|
||||
|
||||
// Log command info since we are starting a new log
|
||||
cmdBegin(true);
|
||||
|
||||
// Detach from parent process
|
||||
forkDetach();
|
||||
|
||||
perlExec();
|
||||
}
|
||||
CATCH_ANY()
|
||||
{
|
||||
RETHROW();
|
||||
}
|
||||
FINALLY()
|
||||
{
|
||||
// Release the lock (mostly here for testing since it would be freed in exitSafe() anyway)
|
||||
lockRelease(true);
|
||||
}
|
||||
TRY_END();
|
||||
|
||||
break;
|
||||
}
|
||||
// Else mark async process as forked
|
||||
else
|
||||
{
|
||||
lockClear(true);
|
||||
forked = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Exit loop if WAL was found
|
||||
if (found)
|
||||
break;
|
||||
|
||||
// Now that the async process has been launched, confess any errors that are found
|
||||
confessOnError = true;
|
||||
}
|
||||
while (waitMore(wait));
|
||||
}
|
||||
else
|
||||
{
|
||||
// Disable async if it was enabled
|
||||
cfgOptionSet(cfgOptArchiveAsync, cfgOptionSource(cfgOptArchiveAsync), varNewBool(false));
|
||||
|
||||
// Call synchronous get
|
||||
result = perlExec();
|
||||
}
|
||||
}
|
||||
MEM_CONTEXT_TEMP_END();
|
||||
|
||||
return result;
|
||||
}
|
12
src/command/archive/get/get.h
Normal file
12
src/command/archive/get/get.h
Normal file
@ -0,0 +1,12 @@
|
||||
/***********************************************************************************************************************************
|
||||
Archive Get Command
|
||||
***********************************************************************************************************************************/
|
||||
#ifndef COMMAND_ARCHIVE_GET_GET_H
|
||||
#define COMMAND_ARCHIVE_GET_GET_H
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Functions
|
||||
***********************************************************************************************************************************/
|
||||
int cmdArchiveGet();
|
||||
|
||||
#endif
|
@ -45,7 +45,7 @@ cmdArchivePush()
|
||||
{
|
||||
// Check if the WAL segment has been pushed. Errors will not be confessed on the first try to allow the async
|
||||
// process a chance to fix them.
|
||||
pushed = archiveAsyncStatus(walSegment, confessOnError);
|
||||
pushed = archiveAsyncStatus(archiveModePush, walSegment, confessOnError);
|
||||
|
||||
// If the WAL segment has not already been pushed then start the async process to push it. There's no point in
|
||||
// forking the async process off more than once so track that as well. Use an archive lock to prevent more than
|
||||
|
@ -17,7 +17,7 @@ static ConfigCommandData configCommandData[CFG_COMMAND_TOTAL] = CONFIG_COMMAND_L
|
||||
CONFIG_COMMAND_LOG_LEVEL_DEFAULT(logLevelInfo)
|
||||
CONFIG_COMMAND_LOG_LEVEL_STDERR_MAX(logLevelTrace)
|
||||
CONFIG_COMMAND_LOCK_REQUIRED(false)
|
||||
CONFIG_COMMAND_LOCK_TYPE(lockTypeNone)
|
||||
CONFIG_COMMAND_LOCK_TYPE(lockTypeArchive)
|
||||
)
|
||||
|
||||
CONFIG_COMMAND
|
||||
@ -215,6 +215,14 @@ static ConfigOptionData configOptionData[CFG_OPTION_TOTAL] = CONFIG_OPTION_LIST
|
||||
CONFIG_OPTION_DEFINE_ID(cfgDefOptArchiveCopy)
|
||||
)
|
||||
|
||||
//------------------------------------------------------------------------------------------------------------------------------
|
||||
CONFIG_OPTION
|
||||
(
|
||||
CONFIG_OPTION_NAME("archive-get-queue-max")
|
||||
CONFIG_OPTION_INDEX(0)
|
||||
CONFIG_OPTION_DEFINE_ID(cfgDefOptArchiveGetQueueMax)
|
||||
)
|
||||
|
||||
//------------------------------------------------------------------------------------------------------------------------------
|
||||
CONFIG_OPTION
|
||||
(
|
||||
|
@ -14,7 +14,7 @@ Command constants
|
||||
/***********************************************************************************************************************************
|
||||
Option constants
|
||||
***********************************************************************************************************************************/
|
||||
#define CFG_OPTION_TOTAL 159
|
||||
#define CFG_OPTION_TOTAL 160
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Command enum
|
||||
@ -48,6 +48,7 @@ typedef enum
|
||||
cfgOptArchiveAsync,
|
||||
cfgOptArchiveCheck,
|
||||
cfgOptArchiveCopy,
|
||||
cfgOptArchiveGetQueueMax,
|
||||
cfgOptArchivePushQueueMax,
|
||||
cfgOptArchiveTimeout,
|
||||
cfgOptBackupStandby,
|
||||
@ -80,16 +81,16 @@ typedef enum
|
||||
cfgOptOutput,
|
||||
cfgOptPerlOption,
|
||||
cfgOptPgHost,
|
||||
cfgOptPgHostCmd = 42,
|
||||
cfgOptPgHostConfig = 50,
|
||||
cfgOptPgHostConfigIncludePath = 58,
|
||||
cfgOptPgHostConfigPath = 66,
|
||||
cfgOptPgHostPort = 74,
|
||||
cfgOptPgHostUser = 82,
|
||||
cfgOptPgPath = 90,
|
||||
cfgOptPgPort = 98,
|
||||
cfgOptPgSocketPath = 106,
|
||||
cfgOptProcess = 114,
|
||||
cfgOptPgHostCmd = 43,
|
||||
cfgOptPgHostConfig = 51,
|
||||
cfgOptPgHostConfigIncludePath = 59,
|
||||
cfgOptPgHostConfigPath = 67,
|
||||
cfgOptPgHostPort = 75,
|
||||
cfgOptPgHostUser = 83,
|
||||
cfgOptPgPath = 91,
|
||||
cfgOptPgPort = 99,
|
||||
cfgOptPgSocketPath = 107,
|
||||
cfgOptProcess = 115,
|
||||
cfgOptProcessMax,
|
||||
cfgOptProtocolTimeout,
|
||||
cfgOptRecoveryOption,
|
||||
|
@ -263,7 +263,6 @@ cfgCommandParamSet(const StringList *param)
|
||||
paramList = strLstDup(param);
|
||||
}
|
||||
MEM_CONTEXT_END();
|
||||
|
||||
}
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
|
@ -228,16 +228,18 @@ static ConfigDefineOptionData configDefineOptionData[] = CFGDEFDATA_OPTION_LIST
|
||||
CFGDEFDATA_OPTION_SECURE(false)
|
||||
|
||||
CFGDEFDATA_OPTION_HELP_SECTION("archive")
|
||||
CFGDEFDATA_OPTION_HELP_SUMMARY("Archive WAL segments asynchronously.")
|
||||
CFGDEFDATA_OPTION_HELP_SUMMARY("Push/get WAL segments asynchronously.")
|
||||
CFGDEFDATA_OPTION_HELP_DESCRIPTION
|
||||
(
|
||||
"WAL segments will be copied to the local repo, then a process will be forked to compress the segment and transfer it "
|
||||
"to the remote repo if configured. Control will be returned to PostgreSQL as soon as the WAL segment is copied "
|
||||
"locally."
|
||||
"Enables asynchronous operation for the archive-push and archive-get commands.\n"
|
||||
"\n"
|
||||
"Asynchronous operation is more efficient because it can reuse connections and take advantage of parallelism. See the "
|
||||
"spool-path, archive-get-queue-max, and archive-push-queue-max options for more information."
|
||||
)
|
||||
|
||||
CFGDEFDATA_OPTION_COMMAND_LIST
|
||||
(
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
|
||||
)
|
||||
|
||||
@ -335,6 +337,40 @@ static ConfigDefineOptionData configDefineOptionData[] = CFGDEFDATA_OPTION_LIST
|
||||
)
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------------------------------------------------------
|
||||
CFGDEFDATA_OPTION
|
||||
(
|
||||
CFGDEFDATA_OPTION_NAME("archive-get-queue-max")
|
||||
CFGDEFDATA_OPTION_REQUIRED(true)
|
||||
CFGDEFDATA_OPTION_SECTION(cfgDefSectionGlobal)
|
||||
CFGDEFDATA_OPTION_TYPE(cfgDefOptTypeInteger)
|
||||
CFGDEFDATA_OPTION_INTERNAL(false)
|
||||
|
||||
CFGDEFDATA_OPTION_INDEX_TOTAL(1)
|
||||
CFGDEFDATA_OPTION_SECURE(false)
|
||||
|
||||
CFGDEFDATA_OPTION_HELP_SECTION("archive")
|
||||
CFGDEFDATA_OPTION_HELP_SUMMARY("Maximum size of the pgBackRest archive-get queue.")
|
||||
CFGDEFDATA_OPTION_HELP_DESCRIPTION
|
||||
(
|
||||
"Specifies the maximum size of the archive-get queue when archive-async is enabled. The queue is stored in the "
|
||||
"spool-path and is used to speed providing WAL to PostgreSQL.\n"
|
||||
"\n"
|
||||
"Size can be entered in bytes (default) or KB, MB, GB, TB, or PB where the multiplier is a power of 1024."
|
||||
)
|
||||
|
||||
CFGDEFDATA_OPTION_COMMAND_LIST
|
||||
(
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
|
||||
)
|
||||
|
||||
CFGDEFDATA_OPTION_OPTIONAL_LIST
|
||||
(
|
||||
CFGDEFDATA_OPTION_OPTIONAL_ALLOW_RANGE(0, 4503599627370496)
|
||||
CFGDEFDATA_OPTION_OPTIONAL_DEFAULT("134217728")
|
||||
)
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------------------------------------------------------
|
||||
CFGDEFDATA_OPTION
|
||||
(
|
||||
@ -348,7 +384,7 @@ static ConfigDefineOptionData configDefineOptionData[] = CFGDEFDATA_OPTION_LIST
|
||||
CFGDEFDATA_OPTION_SECURE(false)
|
||||
|
||||
CFGDEFDATA_OPTION_HELP_SECTION("archive")
|
||||
CFGDEFDATA_OPTION_HELP_SUMMARY("Limit size (in bytes) of the PostgreSQL archive queue.")
|
||||
CFGDEFDATA_OPTION_HELP_SUMMARY("Maximum size of the PostgreSQL archive queue.")
|
||||
CFGDEFDATA_OPTION_HELP_DESCRIPTION
|
||||
(
|
||||
"After the limit is reached, the following will happen:\n"
|
||||
@ -403,6 +439,7 @@ static ConfigDefineOptionData configDefineOptionData[] = CFGDEFDATA_OPTION_LIST
|
||||
|
||||
CFGDEFDATA_OPTION_COMMAND_LIST
|
||||
(
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdBackup)
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdCheck)
|
||||
@ -1483,9 +1520,9 @@ static ConfigDefineOptionData configDefineOptionData[] = CFGDEFDATA_OPTION_LIST
|
||||
CFGDEFDATA_OPTION_HELP_SUMMARY("Manifest save threshold during backup.")
|
||||
CFGDEFDATA_OPTION_HELP_DESCRIPTION
|
||||
(
|
||||
"Defines how often the manifest will be saved during a backup (in bytes). Saving the manifest is important because it "
|
||||
"stores the checksums and allows the resume function to work efficiently. The actual threshold used is 1% of the "
|
||||
"backup size or manifest-save-threshold, whichever is greater.\n"
|
||||
"Defines how often the manifest will be saved during a backup. Saving the manifest is important because it stores the "
|
||||
"checksums and allows the resume function to work efficiently. The actual threshold used is 1% of the backup size "
|
||||
"or manifest-save-threshold, whichever is greater.\n"
|
||||
"\n"
|
||||
"Size can be entered in bytes (default) or KB, MB, GB, TB, or PB where the multiplier is a power of 1024."
|
||||
)
|
||||
@ -2223,6 +2260,7 @@ static ConfigDefineOptionData configDefineOptionData[] = CFGDEFDATA_OPTION_LIST
|
||||
|
||||
CFGDEFDATA_OPTION_COMMAND_LIST
|
||||
(
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdBackup)
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRestore)
|
||||
@ -3576,16 +3614,27 @@ static ConfigDefineOptionData configDefineOptionData[] = CFGDEFDATA_OPTION_LIST
|
||||
CFGDEFDATA_OPTION_HELP_SUMMARY("Path where transient data is stored.")
|
||||
CFGDEFDATA_OPTION_HELP_DESCRIPTION
|
||||
(
|
||||
"This path is used to store acknowledgements from the asynchronous archive-push process. These files are generally "
|
||||
"very small (zero to a few hundred bytes) so not much space is required.\n"
|
||||
"This path is used to store data for the asynchronous archive-push and archive-get command.\n"
|
||||
"\n"
|
||||
"The asynchronous archive-push command writes acknowledgements into the spool path when it has successfully stored WAL "
|
||||
"in the archive (and errors on failure) so the foreground process can quickly notify PostgreSQL. Acknowledgement "
|
||||
"files are very small (zero on success and a few hundred bytes on error).\n"
|
||||
"\n"
|
||||
"The asynchronous archive-push process queues WAL in the spool path so it can be provided very quickly when PostgreSQL "
|
||||
"requests it. Moving files to PostgreSQL is most efficient when the spool path is on the same filesystem as "
|
||||
"pg_xlog/pg_wal.\n"
|
||||
"\n"
|
||||
"The data stored in the spool path is not strictly temporary since it can and should survive a reboot. However, loss "
|
||||
"of the data in the spool path is not a problem. pgBackRest will simply recheck each WAL segment to ensure it is "
|
||||
"safely archived."
|
||||
"safely archived for archive-push and rebuild the queue for archive-get.\n"
|
||||
"\n"
|
||||
"The spool path is intended to be located on a local Posix-compatible filesystem, not a remote filesystem such as NFS "
|
||||
"or CIFS."
|
||||
)
|
||||
|
||||
CFGDEFDATA_OPTION_COMMAND_LIST
|
||||
(
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
|
||||
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
|
||||
)
|
||||
|
||||
|
@ -51,6 +51,7 @@ typedef enum
|
||||
cfgDefOptArchiveAsync,
|
||||
cfgDefOptArchiveCheck,
|
||||
cfgDefOptArchiveCopy,
|
||||
cfgDefOptArchiveGetQueueMax,
|
||||
cfgDefOptArchivePushQueueMax,
|
||||
cfgDefOptArchiveTimeout,
|
||||
cfgDefOptBackupStandby,
|
||||
|
@ -54,6 +54,18 @@ static const struct option optionList[] =
|
||||
.val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | cfgOptArchiveCopy,
|
||||
},
|
||||
|
||||
// archive-get-queue-max option
|
||||
// -----------------------------------------------------------------------------------------------------------------------------
|
||||
{
|
||||
.name = "archive-get-queue-max",
|
||||
.has_arg = required_argument,
|
||||
.val = PARSE_OPTION_FLAG | cfgOptArchiveGetQueueMax,
|
||||
},
|
||||
{
|
||||
.name = "reset-archive-get-queue-max",
|
||||
.val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | cfgOptArchiveGetQueueMax,
|
||||
},
|
||||
|
||||
// archive-push-queue-max option and deprecations
|
||||
// -----------------------------------------------------------------------------------------------------------------------------
|
||||
{
|
||||
|
@ -4,6 +4,7 @@ Main
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "command/archive/get/get.h"
|
||||
#include "command/archive/push/push.h"
|
||||
#include "command/help/help.h"
|
||||
#include "command/command.h"
|
||||
@ -44,6 +45,13 @@ main(int argListSize, const char *argList[])
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
// Archive get command
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
else if (cfgCommand() == cfgCmdArchiveGet)
|
||||
{
|
||||
result = cmdArchiveGet();
|
||||
}
|
||||
|
||||
// Archive push command. Currently only implements local operations of async archive push.
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
else if (cfgCommand() == cfgCmdArchivePush && cfgOptionBool(cfgOptArchiveAsync))
|
||||
|
@ -90,7 +90,14 @@ storageSpoolPathExpression(const String *expression, const String *path)
|
||||
{
|
||||
String *result = NULL;
|
||||
|
||||
if (strcmp(strPtr(expression), STORAGE_SPOOL_ARCHIVE_OUT) == 0)
|
||||
if (strcmp(strPtr(expression), STORAGE_SPOOL_ARCHIVE_IN) == 0)
|
||||
{
|
||||
if (path == NULL)
|
||||
result = strNewFmt("archive/%s/in", strPtr(storageSpoolStanza));
|
||||
else
|
||||
result = strNewFmt("archive/%s/in/%s", strPtr(storageSpoolStanza), strPtr(path));
|
||||
}
|
||||
else if (strcmp(strPtr(expression), STORAGE_SPOOL_ARCHIVE_OUT) == 0)
|
||||
{
|
||||
if (path == NULL)
|
||||
result = strNewFmt("archive/%s/out", strPtr(storageSpoolStanza));
|
||||
|
@ -9,6 +9,7 @@ Storage Helper
|
||||
/***********************************************************************************************************************************
|
||||
Spool storage path constants
|
||||
***********************************************************************************************************************************/
|
||||
#define STORAGE_SPOOL_ARCHIVE_IN "<SPOOL:ARCHIVE:IN>"
|
||||
#define STORAGE_SPOOL_ARCHIVE_OUT "<SPOOL:ARCHIVE:OUT>"
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
|
@ -544,11 +544,20 @@ unit:
|
||||
Archive/Info: partial
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------------
|
||||
- name: get-perl
|
||||
- name: get
|
||||
total: 2
|
||||
perlReq: true
|
||||
|
||||
coverage:
|
||||
Archive/Base: partial
|
||||
command/archive/get/get: full
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------------
|
||||
- name: get-perl
|
||||
total: 3
|
||||
|
||||
coverage:
|
||||
Archive/Base: full
|
||||
Archive/Get/Async: full
|
||||
Archive/Get/File: partial
|
||||
Archive/Get/Get: partial
|
||||
|
||||
|
@ -14,7 +14,6 @@ P00 INFO: archive-push command end: aborted with exception [055]
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-master/repo --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 ERROR: [055]: unable to open [TEST_PATH]/db-master/repo/archive/db/archive.info or [TEST_PATH]/db-master/repo/archive/db/archive.info.copy
|
||||
P00 ERROR: [055]: archive.info does not exist but is required to push/get WAL segments
|
||||
HINT: is archive_command configured in postgresql.conf?
|
||||
@ -114,9 +113,8 @@ P00 INFO: archive-push command end: completed successfully
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get --log-level-console=debug 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=debug --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-master/repo --stanza=db
|
||||
P00 DEBUG: Archive::Get::Get->process(): strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000001
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile(): strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000001
|
||||
P00 DEBUG: Archive::Get::Get->process(): rstryCommandArg = (000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG)
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile(): bAtomic = false, strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000001
|
||||
P00 DEBUG: Common::Lock::lockStopTest(): bStanzaStopRequired = <false>
|
||||
P00 DEBUG: Common::Lock::lockStopTest=>: bStopExists = false
|
||||
P00 DEBUG: Storage::Posix::Driver->new(): bFileSync = <true>, bPathSync = <true>
|
||||
@ -147,9 +145,10 @@ P00 DEBUG: Archive::Get::File::archiveGetCheck=>: strArchiveFile = 00000001
|
||||
P00 DEBUG: Storage::Local->openRead(): bIgnoreMissing = <false>, rhyFilter = [undef], strCipherPass = <redacted>, xFileExp = <REPO:ARCHIVE>/9.4-1/000000010000000100000001-ceb021d9bb41f220511e413b095d2b0d89fec113
|
||||
P00 DEBUG: Storage::Posix::Driver->new(): bFileSync = <true>, bPathSync = <true>
|
||||
P00 DEBUG: Storage::Local->new(): bAllowTemp = <true>, hRule = [undef], lBufferMax = 4194304, oDriver = [object], strCipherPassUser = [undef], strCipherType = [undef], strDefaultFileMode = <0640>, strDefaultPathMode = <0750>, strPathBase = [TEST_PATH]/db-master/db/base, strTempExtension = pgbackrest.tmp
|
||||
P00 DEBUG: Storage::Local->openWrite(): bAtomic = <false>, bPathCreate = <false>, lTimestamp = [undef], rhyFilter = [undef], strCipherPass = [undef], strGroup = [undef], strMode = <0640>, strUser = [undef], xFileExp = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
P00 DEBUG: Storage::Local->openWrite(): bAtomic = false, bPathCreate = <false>, lTimestamp = [undef], rhyFilter = [undef], strCipherPass = [undef], strGroup = [undef], strMode = <0640>, strUser = [undef], xFileExp = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
P00 DEBUG: Storage::Base->copy(): xDestinationFile = [object], xSourceFile = [object]
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile=>: iResult = 0
|
||||
P00 INFO: got WAL segment 000000010000000100000001
|
||||
P00 DEBUG: Main::mainCleanup(): iExitCode = 0
|
||||
P00 DEBUG: Protocol::Helper::protocolDestroy(): bComplete = true, iRemoteIdx = [undef], strRemoteType = [undef]
|
||||
P00 DEBUG: Protocol::Helper::protocolDestroy=>: iExitStatus = 0
|
||||
@ -171,13 +170,12 @@ P00 INFO: archive-push command end: aborted with exception [044]
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-master/repo --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 INFO: got WAL segment 000000010000000100000001
|
||||
P00 INFO: archive-get command end: completed successfully
|
||||
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-master/repo --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 ERROR: [125]: unable to retrieve the archive id for database version '9.4' and system-id '1000000000000000094'
|
||||
P00 INFO: archive-get command end: aborted with exception [125]
|
||||
|
||||
@ -191,7 +189,6 @@ P00 INFO: archive-push command end: aborted with exception [044]
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-master/repo --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 ERROR: [125]: unable to retrieve the archive id for database version '9.4' and system-id '1000000000000000094'
|
||||
P00 INFO: archive-get command end: aborted with exception [125]
|
||||
|
||||
@ -210,7 +207,6 @@ P00 INFO: archive-push command end: aborted with exception [062]
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-master/repo --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 ERROR: [062]: stop file exists for stanza db
|
||||
P00 INFO: archive-get command end: aborted with exception [062]
|
||||
|
||||
@ -234,10 +230,10 @@ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-maste
|
||||
P00 ERROR: [045]: WAL segment 000000010000000100000002 already exists in the archive
|
||||
P00 INFO: archive-push command end: aborted with exception [045]
|
||||
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000002 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get --archive-async --archive-timeout=5 000000010000000100000002 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000002, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-master/repo --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000002
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000002, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --archive-async --archive-timeout=5 --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=[TEST_PATH]/db-master/repo --spool-path=[TEST_PATH]/db-master/spool --stanza=db
|
||||
P00 INFO: got WAL segment 000000010000000100000002 asynchronously
|
||||
P00 INFO: archive-get command end: completed successfully
|
||||
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-master/db/base/pg_xlog/000000010000000100000002.partial
|
||||
|
@ -13,7 +13,6 @@ P00 INFO: archive-push command end: aborted with exception [055]
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --compress-level-network=1 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 ERROR: [055]: raised from remote process on 'backup': archive.info does not exist but is required to push/get WAL segments
|
||||
HINT: is archive_command configured in postgresql.conf?
|
||||
HINT: has a stanza-create been performed?
|
||||
@ -98,9 +97,8 @@ P00 INFO: archive-push command end: completed successfully
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get --log-level-console=debug 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --compress-level-network=1 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=debug --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db
|
||||
P00 DEBUG: Archive::Get::Get->process(): strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000001
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile(): strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000001
|
||||
P00 DEBUG: Archive::Get::Get->process(): rstryCommandArg = (000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG)
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile(): bAtomic = false, strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000001
|
||||
P00 DEBUG: Common::Lock::lockStopTest(): bStanzaStopRequired = <false>
|
||||
P00 DEBUG: Common::Lock::lockStopTest=>: bStopExists = false
|
||||
P00 DEBUG: Protocol::Helper::protocolGet(): bCache = <true>, iProcessIdx = [undef], iRemoteIdx = <1>, strBackRestBin = [undef], strCommand = <archive-get>, strRemoteType = backup
|
||||
@ -122,9 +120,10 @@ P00 DEBUG: Archive::Get::File::archiveGetCheck=>: strArchiveFile = 00000001
|
||||
P00 DEBUG: Protocol::Storage::Remote->openRead(): rhParam = [hash], strFileExp = <REPO:ARCHIVE>/9.4-1/000000010000000100000001-ceb021d9bb41f220511e413b095d2b0d89fec113
|
||||
P00 DEBUG: Storage::Posix::Driver->new(): bFileSync = <true>, bPathSync = <true>
|
||||
P00 DEBUG: Storage::Local->new(): bAllowTemp = <true>, hRule = [undef], lBufferMax = 4194304, oDriver = [object], strCipherPassUser = [undef], strCipherType = [undef], strDefaultFileMode = <0640>, strDefaultPathMode = <0750>, strPathBase = [TEST_PATH]/db-master/db/base, strTempExtension = pgbackrest.tmp
|
||||
P00 DEBUG: Storage::Local->openWrite(): bAtomic = <false>, bPathCreate = <false>, lTimestamp = [undef], rhyFilter = [undef], strCipherPass = [undef], strGroup = [undef], strMode = <0640>, strUser = [undef], xFileExp = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
P00 DEBUG: Storage::Local->openWrite(): bAtomic = false, bPathCreate = <false>, lTimestamp = [undef], rhyFilter = [undef], strCipherPass = [undef], strGroup = [undef], strMode = <0640>, strUser = [undef], xFileExp = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
P00 DEBUG: Storage::Base->copy(): xDestinationFile = [object], xSourceFile = [object]
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile=>: iResult = 0
|
||||
P00 INFO: got WAL segment 000000010000000100000001
|
||||
P00 DEBUG: Main::mainCleanup(): iExitCode = 0
|
||||
P00 DEBUG: Protocol::Helper::protocolDestroy(): bComplete = true, iRemoteIdx = [undef], strRemoteType = [undef]
|
||||
P00 DEBUG: Protocol::Helper::protocolDestroy: found cached protocol: iRemoteIdx = 1, strRemoteType = backup
|
||||
@ -148,13 +147,12 @@ P00 INFO: archive-push command end: aborted with exception [044]
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --compress-level-network=1 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 INFO: got WAL segment 000000010000000100000001
|
||||
P00 INFO: archive-get command end: completed successfully
|
||||
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --compress-level-network=1 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 ERROR: [125]: raised from remote process on 'backup': unable to retrieve the archive id for database version '9.4' and system-id '1000000000000000094'
|
||||
P00 INFO: archive-get command end: aborted with exception [125]
|
||||
|
||||
@ -168,7 +166,6 @@ P00 INFO: archive-push command end: aborted with exception [044]
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --compress-level-network=1 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 ERROR: [125]: raised from remote process on 'backup': unable to retrieve the archive id for database version '9.4' and system-id '1000000000000000094'
|
||||
P00 INFO: archive-get command end: aborted with exception [125]
|
||||
|
||||
@ -187,7 +184,6 @@ P00 INFO: archive-push command end: aborted with exception [062]
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000001 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000001, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --no-compress --compress-level=3 --compress-level-network=1 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000001
|
||||
P00 ERROR: [062]: stop file exists for stanza db
|
||||
P00 INFO: archive-get command end: aborted with exception [062]
|
||||
|
||||
@ -211,10 +207,10 @@ P00 INFO: archive-push command begin [BACKREST-VERSION]: [[TEST_PATH]/db-maste
|
||||
P00 ERROR: [045]: WAL segment 000000010000000100000002 already exists in the archive
|
||||
P00 INFO: archive-push command end: aborted with exception [045]
|
||||
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get --cmd-ssh=/usr/bin/ssh 000000010000000100000002 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get --cmd-ssh=/usr/bin/ssh --archive-async --archive-timeout=5 000000010000000100000002 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000002, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --cmd-ssh=/usr/bin/ssh --no-compress --compress-level=3 --compress-level-network=1 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --stanza=db
|
||||
P00 INFO: get WAL segment 000000010000000100000002
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000002, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --archive-async --archive-timeout=5 --cmd-ssh=/usr/bin/ssh --no-compress --compress-level=3 --compress-level-network=1 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-1] --spool-path=[TEST_PATH]/db-master/spool --stanza=db
|
||||
P00 INFO: got WAL segment 000000010000000100000002 asynchronously
|
||||
P00 INFO: archive-get command end: completed successfully
|
||||
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-master/db/base/pg_xlog/000000010000000100000002.partial
|
||||
|
@ -108,7 +108,7 @@ P00 WARN: WAL segment 000000010000000100000002 already exists in the archive w
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 ERROR: [045]: WAL segment 000000010000000100000002 already exists in the archive
|
||||
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get --cmd-ssh=/usr/bin/ssh 000000010000000100000002 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get --cmd-ssh=/usr/bin/ssh --archive-async --archive-timeout=5 000000010000000100000002 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-push [TEST_PATH]/db-master/db/base/pg_xlog/000000010000000100000002.partial
|
||||
|
@ -516,9 +516,8 @@ db-version="9.4"
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000002 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000002, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=debug --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-master/repo --stanza=db
|
||||
P00 DEBUG: Archive::Get::Get->process(): strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000002
|
||||
P00 INFO: get WAL segment 000000010000000100000002
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile(): strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000002
|
||||
P00 DEBUG: Archive::Get::Get->process(): rstryCommandArg = (000000010000000100000002, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG)
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile(): bAtomic = false, strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000002
|
||||
P00 DEBUG: Common::Lock::lockStopTest(): bStanzaStopRequired = <false>
|
||||
P00 DEBUG: Common::Lock::lockStopTest=>: bStopExists = false
|
||||
P00 DEBUG: Storage::Posix::Driver->new(): bFileSync = <true>, bPathSync = <true>
|
||||
@ -549,9 +548,10 @@ P00 DEBUG: Archive::Get::File::archiveGetCheck=>: strArchiveFile = 00000001
|
||||
P00 DEBUG: Storage::Local->openRead(): bIgnoreMissing = <false>, rhyFilter = [undef], strCipherPass = [undef], xFileExp = <REPO:ARCHIVE>/9.3-1/000000010000000100000002-488ba4b8b98acc510bce86b8f16e3c1ed9886a29.gz
|
||||
P00 DEBUG: Storage::Posix::Driver->new(): bFileSync = <true>, bPathSync = <true>
|
||||
P00 DEBUG: Storage::Local->new(): bAllowTemp = <true>, hRule = [undef], lBufferMax = 4194304, oDriver = [object], strCipherPassUser = [undef], strCipherType = [undef], strDefaultFileMode = <0640>, strDefaultPathMode = <0750>, strPathBase = [TEST_PATH]/db-master/db/base, strTempExtension = pgbackrest.tmp
|
||||
P00 DEBUG: Storage::Local->openWrite(): bAtomic = <false>, bPathCreate = <false>, lTimestamp = [undef], rhyFilter = ({rxyParam => ({strCompressType => decompress}), strClass => pgBackRest::Storage::Filter::Gzip}), strCipherPass = [undef], strGroup = [undef], strMode = <0640>, strUser = [undef], xFileExp = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
P00 DEBUG: Storage::Local->openWrite(): bAtomic = false, bPathCreate = <false>, lTimestamp = [undef], rhyFilter = ({rxyParam => ({strCompressType => decompress}), strClass => pgBackRest::Storage::Filter::Gzip}), strCipherPass = [undef], strGroup = [undef], strMode = <0640>, strUser = [undef], xFileExp = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
P00 DEBUG: Storage::Base->copy(): xDestinationFile = [object], xSourceFile = [object]
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile=>: iResult = 0
|
||||
P00 INFO: got WAL segment 000000010000000100000002
|
||||
P00 DEBUG: Main::mainCleanup(): iExitCode = 0
|
||||
P00 DEBUG: Protocol::Helper::protocolDestroy(): bComplete = true, iRemoteIdx = [undef], strRemoteType = [undef]
|
||||
P00 DEBUG: Protocol::Helper::protocolDestroy=>: iExitStatus = 0
|
||||
|
@ -382,9 +382,8 @@ db-version="9.4"
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000002 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000002, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --compress-level=3 --compress-level-network=1 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=debug --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-host=backup --repo1-host-cmd=[BACKREST-BIN] --repo1-host-config=[TEST_PATH]/backup/pgbackrest.conf --repo1-host-user=[USER-2] --stanza=db
|
||||
P00 DEBUG: Archive::Get::Get->process(): strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000002
|
||||
P00 INFO: get WAL segment 000000010000000100000002
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile(): strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000002
|
||||
P00 DEBUG: Archive::Get::Get->process(): rstryCommandArg = (000000010000000100000002, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG)
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile(): bAtomic = false, strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000002
|
||||
P00 DEBUG: Common::Lock::lockStopTest(): bStanzaStopRequired = <false>
|
||||
P00 DEBUG: Common::Lock::lockStopTest=>: bStopExists = false
|
||||
P00 DEBUG: Protocol::Helper::protocolGet(): bCache = <true>, iProcessIdx = [undef], iRemoteIdx = <1>, strBackRestBin = [undef], strCommand = <archive-get>, strRemoteType = backup
|
||||
@ -406,9 +405,10 @@ P00 DEBUG: Archive::Get::File::archiveGetCheck=>: strArchiveFile = 00000001
|
||||
P00 DEBUG: Protocol::Storage::Remote->openRead(): rhParam = [hash], strFileExp = <REPO:ARCHIVE>/9.3-1/000000010000000100000002-488ba4b8b98acc510bce86b8f16e3c1ed9886a29.gz
|
||||
P00 DEBUG: Storage::Posix::Driver->new(): bFileSync = <true>, bPathSync = <true>
|
||||
P00 DEBUG: Storage::Local->new(): bAllowTemp = <true>, hRule = [undef], lBufferMax = 4194304, oDriver = [object], strCipherPassUser = [undef], strCipherType = [undef], strDefaultFileMode = <0640>, strDefaultPathMode = <0750>, strPathBase = [TEST_PATH]/db-master/db/base, strTempExtension = pgbackrest.tmp
|
||||
P00 DEBUG: Storage::Local->openWrite(): bAtomic = <false>, bPathCreate = <false>, lTimestamp = [undef], rhyFilter = ({rxyParam => ({strCompressType => decompress}), strClass => pgBackRest::Storage::Filter::Gzip}), strCipherPass = [undef], strGroup = [undef], strMode = <0640>, strUser = [undef], xFileExp = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
P00 DEBUG: Storage::Local->openWrite(): bAtomic = false, bPathCreate = <false>, lTimestamp = [undef], rhyFilter = ({rxyParam => ({strCompressType => decompress}), strClass => pgBackRest::Storage::Filter::Gzip}), strCipherPass = [undef], strGroup = [undef], strMode = <0640>, strUser = [undef], xFileExp = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
P00 DEBUG: Storage::Base->copy(): xDestinationFile = [object], xSourceFile = [object]
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile=>: iResult = 0
|
||||
P00 INFO: got WAL segment 000000010000000100000002
|
||||
P00 DEBUG: Main::mainCleanup(): iExitCode = 0
|
||||
P00 DEBUG: Protocol::Helper::protocolDestroy(): bComplete = true, iRemoteIdx = [undef], strRemoteType = [undef]
|
||||
P00 DEBUG: Protocol::Helper::protocolDestroy: found cached protocol: iRemoteIdx = 1, strRemoteType = backup
|
||||
|
@ -491,9 +491,8 @@ db-version="9.4"
|
||||
> [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --stanza=db archive-get 000000010000000100000002 [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
------------------------------------------------------------------------------------------------------------------------------------
|
||||
P00 INFO: archive-get command begin [BACKREST-VERSION]: [000000010000000100000002, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG] --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=debug --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-ssl --repo1-type=s3 --stanza=db
|
||||
P00 DEBUG: Archive::Get::Get->process(): strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000002
|
||||
P00 INFO: get WAL segment 000000010000000100000002
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile(): strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000002
|
||||
P00 DEBUG: Archive::Get::Get->process(): rstryCommandArg = (000000010000000100000002, [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG)
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile(): bAtomic = false, strDestinationFile = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG, strSourceArchive = 000000010000000100000002
|
||||
P00 DEBUG: Common::Lock::lockStopTest(): bStanzaStopRequired = <false>
|
||||
P00 DEBUG: Common::Lock::lockStopTest=>: bStopExists = false
|
||||
P00 DEBUG: Storage::S3::Request->new(): bVerifySsl = false, iPort = [undef], lBufferMax = 4194304, strAccessKeyId = <redacted>, strBucket = pgbackrest-dev, strCaFile = [undef], strCaPath = [undef], strEndPoint = s3.amazonaws.com, strHost = [undef], strRegion = us-east-1, strSecretAccessKey = <redacted>
|
||||
@ -524,9 +523,10 @@ P00 DEBUG: Archive::Get::File::archiveGetCheck=>: strArchiveFile = 00000001
|
||||
P00 DEBUG: Storage::Local->openRead(): bIgnoreMissing = <false>, rhyFilter = [undef], strCipherPass = [undef], xFileExp = <REPO:ARCHIVE>/9.3-1/000000010000000100000002-488ba4b8b98acc510bce86b8f16e3c1ed9886a29.gz
|
||||
P00 DEBUG: Storage::Posix::Driver->new(): bFileSync = <true>, bPathSync = <true>
|
||||
P00 DEBUG: Storage::Local->new(): bAllowTemp = <true>, hRule = [undef], lBufferMax = 4194304, oDriver = [object], strCipherPassUser = [undef], strCipherType = [undef], strDefaultFileMode = <0640>, strDefaultPathMode = <0750>, strPathBase = [TEST_PATH]/db-master/db/base, strTempExtension = pgbackrest.tmp
|
||||
P00 DEBUG: Storage::Local->openWrite(): bAtomic = <false>, bPathCreate = <false>, lTimestamp = [undef], rhyFilter = ({rxyParam => ({strCompressType => decompress}), strClass => pgBackRest::Storage::Filter::Gzip}), strCipherPass = [undef], strGroup = [undef], strMode = <0640>, strUser = [undef], xFileExp = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
P00 DEBUG: Storage::Local->openWrite(): bAtomic = false, bPathCreate = <false>, lTimestamp = [undef], rhyFilter = ({rxyParam => ({strCompressType => decompress}), strClass => pgBackRest::Storage::Filter::Gzip}), strCipherPass = [undef], strGroup = [undef], strMode = <0640>, strUser = [undef], xFileExp = [TEST_PATH]/db-master/db/base/pg_xlog/RECOVERYXLOG
|
||||
P00 DEBUG: Storage::Base->copy(): xDestinationFile = [object], xSourceFile = [object]
|
||||
P00 DEBUG: Archive::Get::File::archiveGetFile=>: iResult = 0
|
||||
P00 INFO: got WAL segment 000000010000000100000002
|
||||
P00 DEBUG: Main::mainCleanup(): iExitCode = 0
|
||||
P00 DEBUG: Protocol::Helper::protocolDestroy(): bComplete = true, iRemoteIdx = [undef], strRemoteType = [undef]
|
||||
P00 DEBUG: Protocol::Helper::protocolDestroy=>: iExitStatus = 0
|
||||
|
@ -457,13 +457,21 @@ sub walGenerate
|
||||
my $iSourceNo = shift;
|
||||
my $strWalSegment = shift;
|
||||
my $bPartial = shift;
|
||||
my $bChecksum = shift;
|
||||
my $bReady = shift;
|
||||
|
||||
my $strWalFile = "${strWalPath}/${strWalSegment}" . (defined($bPartial) && $bPartial ? '.partial' : '');
|
||||
my $rtWalContent = $self->walGenerateContent($strPgVersion, {iSourceNo => $iSourceNo});
|
||||
my $strWalFile =
|
||||
"${strWalPath}/${strWalSegment}" . ($bChecksum ? '-' . sha1_hex($rtWalContent) : '') .
|
||||
(defined($bPartial) && $bPartial ? '.partial' : '');
|
||||
|
||||
# Put the WAL segment and the ready file
|
||||
storageTest()->put($strWalFile, $rtWalContent);
|
||||
storageTest()->put("${strWalPath}/archive_status/${strWalSegment}.ready");
|
||||
|
||||
if (!defined($bReady) || $bReady)
|
||||
{
|
||||
storageTest()->put("${strWalPath}/archive_status/${strWalSegment}.ready");
|
||||
}
|
||||
|
||||
return $strWalFile;
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ use Storable qw(dclone);
|
||||
use Digest::SHA qw(sha1_hex);
|
||||
|
||||
use pgBackRest::Archive::Common;
|
||||
use pgBackRest::Archive::Get::Async;
|
||||
use pgBackRest::Archive::Get::File;
|
||||
use pgBackRest::Archive::Get::Get;
|
||||
use pgBackRest::Archive::Info;
|
||||
@ -24,6 +25,7 @@ use pgBackRest::Config::Config;
|
||||
use pgBackRest::DbVersion;
|
||||
use pgBackRest::Manifest;
|
||||
use pgBackRest::Protocol::Storage::Helper;
|
||||
use pgBackRest::Storage::Helper;
|
||||
|
||||
use pgBackRestTest::Env::HostEnvTest;
|
||||
use pgBackRestTest::Common::ExecuteTest;
|
||||
@ -37,9 +39,11 @@ sub initModule
|
||||
my $self = shift;
|
||||
|
||||
$self->{strDbPath} = $self->testPath() . '/db';
|
||||
$self->{strLockPath} = $self->testPath() . '/lock';
|
||||
$self->{strRepoPath} = $self->testPath() . '/repo';
|
||||
$self->{strArchivePath} = "$self->{strRepoPath}/archive/" . $self->stanza();
|
||||
$self->{strBackupPath} = "$self->{strRepoPath}/backup/" . $self->stanza();
|
||||
$self->{strSpoolPath} = "$self->{strArchivePath}/in";
|
||||
}
|
||||
|
||||
####################################################################################################################################
|
||||
@ -57,6 +61,8 @@ sub initTest
|
||||
$self->optionTestSet(CFGOPT_STANZA, $self->stanza());
|
||||
$self->optionTestSet(CFGOPT_REPO_PATH, $self->testPath() . '/repo');
|
||||
$self->optionTestSet(CFGOPT_PG_PATH, $self->{strDbPath});
|
||||
$self->optionTestSet(CFGOPT_LOG_PATH, $self->testPath());
|
||||
$self->optionTestSet(CFGOPT_LOCK_PATH, $self->{strLockPath});
|
||||
$self->configTestLoad(CFGCMD_ARCHIVE_GET);
|
||||
|
||||
# Create archive info path
|
||||
@ -65,6 +71,9 @@ sub initTest
|
||||
# Create backup info path
|
||||
storageTest()->pathCreate($self->{strBackupPath}, {bIgnoreExists => true, bCreateParent => true});
|
||||
|
||||
# Create spool path
|
||||
storageTest()->pathCreate($self->{strSpoolPath}, {bIgnoreExists => true, bCreateParent => true});
|
||||
|
||||
# Create pg_control path
|
||||
storageTest()->pathCreate($self->{strDbPath} . '/' . DB_PATH_GLOBAL, {bCreateParent => true});
|
||||
|
||||
@ -90,7 +99,7 @@ sub run
|
||||
my $strArchivePath;
|
||||
|
||||
################################################################################################################################
|
||||
if ($self->begin("Archive::Base::getCheck()"))
|
||||
if ($self->begin("Archive::Common::archiveGetCheck()"))
|
||||
{
|
||||
# Create and save archive.info file
|
||||
my $oArchiveInfo = new pgBackRest::Archive::Info(storageRepo()->pathGet(STORAGE_REPO_ARCHIVE), false,
|
||||
@ -184,11 +193,11 @@ sub run
|
||||
}
|
||||
|
||||
################################################################################################################################
|
||||
if ($self->begin("Archive::Get::Get::get()"))
|
||||
if ($self->begin("Archive::Get::Get::get() sync"))
|
||||
{
|
||||
# archive.info missing
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
$self->testException(sub {archiveGetFile($strWalSegment, $strDestinationFile)},
|
||||
$self->testException(sub {archiveGetFile($strWalSegment, $strDestinationFile, false)},
|
||||
ERROR_FILE_MISSING,
|
||||
ARCHIVE_INFO_FILE . " does not exist but is required to push/get WAL segments\n" .
|
||||
"HINT: is archive_command configured in postgresql.conf?\n" .
|
||||
@ -205,7 +214,7 @@ sub run
|
||||
|
||||
# file not found
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
$self->testResult(sub {archiveGetFile($strWalSegment, $strDestinationFile)}, 1,
|
||||
$self->testResult(sub {archiveGetFile($strWalSegment, $strDestinationFile, false)}, 1,
|
||||
"unable to find ${strWalSegment} in the archive");
|
||||
|
||||
# file found but is not a WAL segment
|
||||
@ -219,14 +228,14 @@ sub run
|
||||
# Create path to copy file
|
||||
storageRepo()->pathCreate($strDestinationPath);
|
||||
|
||||
$self->testResult(sub {archiveGetFile(BOGUS, $strDestinationFile)}, 0,
|
||||
$self->testResult(sub {archiveGetFile(BOGUS, $strDestinationFile, false)}, 0,
|
||||
"non-WAL segment copied");
|
||||
|
||||
# Confirm the correct file is copied
|
||||
$self->testResult(sub {sha1_hex(${storageRepo()->get($strDestinationFile)})}, $strBogusHash,
|
||||
' check correct non-WAL copied from older archiveId');
|
||||
|
||||
# create same WAL segment in same DB but different archives and different has values. Confirm latest one copied.
|
||||
# create same WAL segment in same DB but different archives and different hash values. Confirm latest one copied.
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
my $strWalMajorPath = "${strArchivePath}/" . substr($strWalSegment, 0, 16);
|
||||
my $strWalSegmentName = "${strWalSegment}-${strFileHash}";
|
||||
@ -244,7 +253,7 @@ sub run
|
||||
storageRepo()->pathCreate($strWalMajorPath, {bCreateParent => true});
|
||||
storageRepo()->put("${strWalMajorPath}/${strWalSegmentName}", $strFileContent);
|
||||
|
||||
$self->testResult(sub {archiveGetFile($strWalSegmentName, $strDestinationFile)}, 0,
|
||||
$self->testResult(sub {archiveGetFile($strWalSegmentName, $strDestinationFile, false)}, 0,
|
||||
"WAL segment copied");
|
||||
|
||||
# Confirm the correct file is copied
|
||||
@ -271,13 +280,102 @@ sub run
|
||||
# Overwrite current pg_control file with older version
|
||||
$self->controlGenerate($self->{strDbPath}, PG_VERSION_93);
|
||||
|
||||
$self->testResult(sub {archiveGetFile($strWalSegmentName, $strDestinationFile)}, 0,
|
||||
$self->testResult(sub {archiveGetFile($strWalSegmentName, $strDestinationFile, false)}, 0,
|
||||
"WAL segment copied from older db backupset to same version older db");
|
||||
|
||||
# Confirm the correct file is copied
|
||||
$self->testResult(sub {sha1_hex(${storageRepo()->get($strDestinationFile)})}, $strWalHash,
|
||||
' check correct WAL copied from older db');
|
||||
}
|
||||
|
||||
################################################################################################################################
|
||||
if ($self->begin("Archive::Get::Get::get() async"))
|
||||
{
|
||||
# Test error in local process when stanza has not been created
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
my @stryWal = ('000000010000000A0000000A', '000000010000000A0000000B');
|
||||
|
||||
my $oGetAsync = new pgBackRest::Archive::Get::Async(
|
||||
$self->{strSpoolPath}, $self->backrestExe(), \@stryWal);
|
||||
|
||||
$self->optionTestSetBool(CFGOPT_ARCHIVE_ASYNC, true);
|
||||
$self->optionTestSet(CFGOPT_SPOOL_PATH, $self->{strRepoPath});
|
||||
$self->configTestLoad(CFGCMD_ARCHIVE_GET);
|
||||
|
||||
$oGetAsync->process();
|
||||
|
||||
my $strErrorMessage =
|
||||
"55\n" .
|
||||
"raised from local-1 process: archive.info does not exist but is required to push/get WAL segments\n" .
|
||||
"HINT: is archive_command configured in postgresql.conf?\n" .
|
||||
"HINT: has a stanza-create been performed?\n" .
|
||||
"HINT: use --no-archive-check to disable archive checks during backup if you have an alternate archiving scheme.";
|
||||
|
||||
$self->testResult(
|
||||
sub {storageSpool()->list(STORAGE_SPOOL_ARCHIVE_IN)},
|
||||
"(000000010000000A0000000A.error, 000000010000000A0000000B.error)", 'error files created');
|
||||
|
||||
$self->testResult(
|
||||
${storageSpool()->get(STORAGE_SPOOL_ARCHIVE_IN . "/000000010000000A0000000A.error")}, $strErrorMessage,
|
||||
"check error file contents");
|
||||
storageSpool()->remove(STORAGE_SPOOL_ARCHIVE_IN . "/000000010000000A0000000A.error");
|
||||
$self->testResult(
|
||||
${storageSpool()->get(STORAGE_SPOOL_ARCHIVE_IN . "/000000010000000A0000000B.error")}, $strErrorMessage,
|
||||
"check error file contents");
|
||||
storageSpool()->remove(STORAGE_SPOOL_ARCHIVE_IN . "/000000010000000A0000000B.error");
|
||||
|
||||
# Create archive info file
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
my $oArchiveInfo = new pgBackRest::Archive::Info($self->{strArchivePath}, false, {bIgnoreMissing => true});
|
||||
$oArchiveInfo->create(PG_VERSION_94, $self->dbSysId(PG_VERSION_94), true);
|
||||
|
||||
my $strArchiveId = $oArchiveInfo->archiveId();
|
||||
|
||||
# Transfer first file
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
my $strWalPath = "$self->{strRepoPath}/archive/db/${strArchiveId}/000000010000000A";
|
||||
storageRepo()->pathCreate($strWalPath, {bCreateParent => true});
|
||||
|
||||
$self->walGenerate($strWalPath, PG_VERSION_94, 1, "000000010000000A0000000A", false, true, false);
|
||||
$oGetAsync->processQueue();
|
||||
|
||||
$self->testResult(
|
||||
sub {storageSpool()->list(STORAGE_SPOOL_ARCHIVE_IN)},
|
||||
"(000000010000000A0000000A, 000000010000000A0000000B.ok)", 'WAL and OK file');
|
||||
|
||||
# Transfer second file
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
@stryWal = ('000000010000000A0000000B');
|
||||
|
||||
storageSpool()->remove(STORAGE_SPOOL_ARCHIVE_IN . "/000000010000000A0000000B.ok");
|
||||
|
||||
$self->walGenerate($strWalPath, PG_VERSION_94, 1, "000000010000000A0000000B", false, true, false);
|
||||
$oGetAsync->processQueue();
|
||||
|
||||
$self->testResult(
|
||||
sub {storageSpool()->list(STORAGE_SPOOL_ARCHIVE_IN)},
|
||||
"(000000010000000A0000000A, 000000010000000A0000000B)", 'WAL files');
|
||||
|
||||
# Error on main process
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
@stryWal = ('000000010000000A0000000C');
|
||||
|
||||
storageTest()->put(storageTest()->openWrite($self->{strLockPath} . "/db.stop", {bPathCreate => true}), undef);
|
||||
|
||||
$oGetAsync->processQueue();
|
||||
|
||||
$self->testResult(
|
||||
sub {storageSpool()->list(STORAGE_SPOOL_ARCHIVE_IN)},
|
||||
"(000000010000000A0000000A, 000000010000000A0000000B, 000000010000000A0000000C.error)", 'WAL files and error file');
|
||||
|
||||
# Set protocol timeout low
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
$self->optionTestSet(CFGOPT_PROTOCOL_TIMEOUT, 30);
|
||||
$self->optionTestSet(CFGOPT_DB_TIMEOUT, 29);
|
||||
$self->configTestLoad(CFGCMD_ARCHIVE_GET);
|
||||
|
||||
$oGetAsync->process();
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
||||
|
@ -159,12 +159,12 @@ sub run
|
||||
# Test that the WAL was pushed
|
||||
$self->archiveCheck($strSourceFile, $strArchiveChecksum, false);
|
||||
|
||||
# Remove WAL
|
||||
storageTest()->remove("${strWalPath}/${strSourceFile}", {bIgnoreMissing => false});
|
||||
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
&log(INFO, ' get first WAL');
|
||||
|
||||
# Remove WAL so it can be recovered
|
||||
storageTest()->remove("${strWalPath}/${strSourceFile}", {bIgnoreMissing => false});
|
||||
|
||||
$oHostDbMaster->executeSimple(
|
||||
$strCommandGet . " ${strLogDebug} ${strSourceFile} ${strWalPath}/RECOVERYXLOG",
|
||||
{oLogTest => $self->expect()});
|
||||
@ -325,6 +325,8 @@ sub run
|
||||
|
||||
$oHostDbMaster->start({strStanza => $oHostDbMaster->stanza()});
|
||||
|
||||
storageTest->remove("${strWalPath}/RECOVERYXLOG", {bIgnoreMissing => false});
|
||||
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
&log(INFO, ' WAL duplicate ok');
|
||||
|
||||
@ -339,14 +341,15 @@ sub run
|
||||
$strCommandPush . " ${strWalPath}/${strSourceFile}",
|
||||
{iExpectedExitStatus => ERROR_ARCHIVE_DUPLICATE, oLogTest => $self->expect()});
|
||||
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
&log(INFO, ' get second WAL');
|
||||
|
||||
# Remove WAL so it can be recovered
|
||||
# Remove WAL
|
||||
storageTest()->remove("${strWalPath}/${strSourceFile}", {bIgnoreMissing => false});
|
||||
|
||||
#---------------------------------------------------------------------------------------------------------------------------
|
||||
&log(INFO, " get second WAL (${strSourceFile})");
|
||||
|
||||
$oHostDbMaster->executeSimple(
|
||||
$strCommandGet . ($bRemote ? ' --cmd-ssh=/usr/bin/ssh' : '') . " ${strSourceFile} ${strWalPath}/RECOVERYXLOG",
|
||||
$strCommandGet . ($bRemote ? ' --cmd-ssh=/usr/bin/ssh' : '') .
|
||||
" --archive-async --archive-timeout=5 ${strSourceFile} ${strWalPath}/RECOVERYXLOG",
|
||||
{oLogTest => $self->expect()});
|
||||
|
||||
# Check that the destination file exists
|
||||
|
@ -16,6 +16,6 @@ void
|
||||
harnessCfgLoad(unsigned int argListSize, const char *argList[])
|
||||
{
|
||||
configParse(argListSize, argList);
|
||||
logInit(logLevelInfo, logLevelOff, logLevelOff, false);
|
||||
logInit(logLevelInfo, logLevelOff, logLevelDebug, false);
|
||||
cfgLoadUpdateOption();
|
||||
}
|
||||
|
@ -26,42 +26,48 @@ testRun()
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
String *segment = strNew("000000010000000100000001");
|
||||
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(segment, false), false, "directory and status file not present");
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(archiveModePush, segment, false), false, "directory and status file not present");
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(archiveModeGet, segment, false), false, "directory and status file not present");
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
mkdir(strPtr(strNewFmt("%s/archive", testPath())), 0750);
|
||||
mkdir(strPtr(strNewFmt("%s/archive/db", testPath())), 0750);
|
||||
mkdir(strPtr(strNewFmt("%s/archive/db/out", testPath())), 0750);
|
||||
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(segment, false), false, "status file not present");
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(archiveModePush, segment, false), false, "status file not present");
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
storagePutNP(
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_OUT "/%s.ok", strPtr(segment))),
|
||||
bufNewStr(strNew(BOGUS_STR)));
|
||||
TEST_ERROR(
|
||||
archiveAsyncStatus(segment, false), FormatError, "000000010000000100000001.ok content must have at least two lines");
|
||||
archiveAsyncStatus(archiveModePush, segment, false), FormatError,
|
||||
"000000010000000100000001.ok content must have at least two lines");
|
||||
|
||||
storagePutNP(
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_OUT "/%s.ok", strPtr(segment))),
|
||||
bufNewStr(strNew(BOGUS_STR "\n")));
|
||||
TEST_ERROR(archiveAsyncStatus(segment, false), FormatError, "000000010000000100000001.ok message must be > 0");
|
||||
TEST_ERROR(
|
||||
archiveAsyncStatus(archiveModePush, segment, false), FormatError, "000000010000000100000001.ok message must be > 0");
|
||||
|
||||
storagePutNP(
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_OUT "/%s.ok", strPtr(segment))),
|
||||
bufNewStr(strNew(BOGUS_STR "\nmessage")));
|
||||
TEST_ERROR(archiveAsyncStatus(segment, false), FormatError, "unable to convert str 'BOGUS' to int");
|
||||
TEST_ERROR(archiveAsyncStatus(archiveModePush, segment, false), FormatError, "unable to convert str 'BOGUS' to int");
|
||||
|
||||
storagePutNP(storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_OUT "/%s.ok", strPtr(segment))), NULL);
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(archiveModePush, segment, false), true, "ok file");
|
||||
|
||||
storagePutNP(
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_OUT "/%s.ok", strPtr(segment))),
|
||||
bufNewStr(strNew("0\nwarning")));
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(segment, false), true, "ok file with warning");
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(archiveModePush, segment, false), true, "ok file with warning");
|
||||
testLogResult("P00 WARN: warning");
|
||||
|
||||
storagePutNP(
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_OUT "/%s.ok", strPtr(segment))),
|
||||
bufNewStr(strNew("25\nerror")));
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(segment, false), true, "error status renamed to ok");
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(archiveModePush, segment, false), true, "error status renamed to ok");
|
||||
testLogResult(
|
||||
"P00 WARN: WAL segment '000000010000000100000001' was not pushed due to error [25] and was manually skipped: error");
|
||||
|
||||
@ -70,20 +76,22 @@ testRun()
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_OUT "/%s.error", strPtr(segment))),
|
||||
bufNewStr(strNew("")));
|
||||
TEST_ERROR(
|
||||
archiveAsyncStatus(segment, false), AssertError,
|
||||
archiveAsyncStatus(archiveModePush, segment, false), AssertError,
|
||||
strPtr(
|
||||
strNewFmt(
|
||||
"multiple status files found in '%s/archive/db/out' for WAL segment '000000010000000100000001'", testPath())));
|
||||
|
||||
unlink(strPtr(storagePathNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_OUT "/%s.ok", strPtr(segment)))));
|
||||
TEST_ERROR(archiveAsyncStatus(segment, true), AssertError, "status file '000000010000000100000001.error' has no content");
|
||||
TEST_ERROR(
|
||||
archiveAsyncStatus(archiveModePush, segment, true), AssertError,
|
||||
"status file '000000010000000100000001.error' has no content");
|
||||
|
||||
storagePutNP(
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_OUT "/%s.error", strPtr(segment))),
|
||||
bufNewStr(strNew("25\nmessage")));
|
||||
TEST_ERROR(archiveAsyncStatus(segment, true), AssertError, "message");
|
||||
TEST_ERROR(archiveAsyncStatus(archiveModePush, segment, true), AssertError, "message");
|
||||
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(segment, false), false, "suppress error");
|
||||
TEST_RESULT_BOOL(archiveAsyncStatus(archiveModePush, segment, false), false, "suppress error");
|
||||
|
||||
unlink(strPtr(storagePathNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_OUT "/%s.error", strPtr(segment)))));
|
||||
}
|
||||
|
262
test/src/module/archive/getTest.c
Normal file
262
test/src/module/archive/getTest.c
Normal file
@ -0,0 +1,262 @@
|
||||
/***********************************************************************************************************************************
|
||||
Test Archive Get Command
|
||||
***********************************************************************************************************************************/
|
||||
#include <sys/wait.h>
|
||||
|
||||
#include "common/harnessConfig.h"
|
||||
#include "postgres/type.h"
|
||||
#include "postgres/version.h"
|
||||
|
||||
/***********************************************************************************************************************************
|
||||
Test Run
|
||||
***********************************************************************************************************************************/
|
||||
void
|
||||
testRun()
|
||||
{
|
||||
Storage *storageTest = storageNewP(strNew(testPath()), .write = true);
|
||||
|
||||
// *****************************************************************************************************************************
|
||||
if (testBegin("queueNeed()"))
|
||||
{
|
||||
StringList *argList = strLstNew();
|
||||
strLstAddZ(argList, "pgbackrest");
|
||||
strLstAddZ(argList, "--stanza=db");
|
||||
strLstAddZ(argList, "--archive-async");
|
||||
strLstAdd(argList, strNewFmt("--spool-path=%s/spool", testPath()));
|
||||
strLstAddZ(argList, "archive-get");
|
||||
harnessCfgLoad(strLstSize(argList), strLstPtr(argList));
|
||||
|
||||
size_t queueSize = WAL_SEGMENT_DEFAULT_SIZE;
|
||||
size_t walSegmentSize = WAL_SEGMENT_DEFAULT_SIZE;
|
||||
|
||||
TEST_ERROR_FMT(
|
||||
queueNeed(strNew("000000010000000100000001"), false, queueSize, walSegmentSize, PG_VERSION_92),
|
||||
PathOpenError, "unable to open path '%s/spool/archive/db/in' for read: [2] No such file or directory", testPath());
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
storagePathCreateNP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN));
|
||||
|
||||
TEST_RESULT_STR(
|
||||
strPtr(strLstJoin(queueNeed(strNew("000000010000000100000001"), false, queueSize, walSegmentSize, PG_VERSION_92), "|")),
|
||||
"000000010000000100000001|000000010000000100000002", "queue size smaller than min");
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
queueSize = WAL_SEGMENT_DEFAULT_SIZE * 3;
|
||||
|
||||
TEST_RESULT_STR(
|
||||
strPtr(strLstJoin(queueNeed(strNew("000000010000000100000001"), false, queueSize, walSegmentSize, PG_VERSION_92), "|")),
|
||||
"000000010000000100000001|000000010000000100000002|000000010000000100000003", "empty queue");
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
storagePutNP(
|
||||
storageNewWriteNP(
|
||||
storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN "/0000000100000001000000FE")), bufNew(walSegmentSize));
|
||||
storagePutNP(
|
||||
storageNewWriteNP(
|
||||
storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN "/0000000100000001000000FF")), bufNew(walSegmentSize));
|
||||
|
||||
TEST_RESULT_STR(
|
||||
strPtr(strLstJoin(queueNeed(strNew("0000000100000001000000FE"), false, queueSize, walSegmentSize, PG_VERSION_92), "|")),
|
||||
"000000010000000200000000|000000010000000200000001", "queue has wal < 9.3");
|
||||
|
||||
TEST_RESULT_STR(
|
||||
strPtr(strLstJoin(storageListNP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN)), "|")),
|
||||
"0000000100000001000000FE", "check queue");
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
walSegmentSize = 1024 * 1024;
|
||||
queueSize = walSegmentSize * 5;
|
||||
|
||||
storagePutNP(storageNewWriteNP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN "/junk")), bufNew(16));
|
||||
storagePutNP(
|
||||
storageNewWriteNP(
|
||||
storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN "/000000010000000A00000FFE")), bufNew(walSegmentSize));
|
||||
storagePutNP(
|
||||
storageNewWriteNP(
|
||||
storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN "/000000010000000A00000FFF")), bufNew(walSegmentSize));
|
||||
|
||||
TEST_RESULT_STR(
|
||||
strPtr(strLstJoin(queueNeed(strNew("000000010000000A00000FFD"), true, queueSize, walSegmentSize, PG_VERSION_11), "|")),
|
||||
"000000010000000B00000000|000000010000000B00000001|000000010000000B00000002", "queue has wal >= 9.3");
|
||||
|
||||
TEST_RESULT_STR(
|
||||
strPtr(strLstJoin(strLstSort(storageListNP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN)), sortOrderAsc), "|")),
|
||||
"000000010000000A00000FFE|000000010000000A00000FFF", "check queue");
|
||||
|
||||
storagePathRemoveP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN), .recurse = true);
|
||||
}
|
||||
|
||||
// *****************************************************************************************************************************
|
||||
if (testBegin("cmdArchiveGet()"))
|
||||
{
|
||||
StringList *argList = strLstNew();
|
||||
strLstAddZ(argList, "pgbackrest");
|
||||
strLstAddZ(argList, "--archive-timeout=1");
|
||||
strLstAdd(argList, strNewFmt("--log-path=%s", testPath()));
|
||||
strLstAdd(argList, strNewFmt("--log-level-file=debug"));
|
||||
strLstAddZ(argList, "--stanza=db");
|
||||
strLstAddZ(argList, "archive-get");
|
||||
harnessCfgLoad(strLstSize(argList), strLstPtr(argList));
|
||||
|
||||
TEST_ERROR(cmdArchiveGet(), ParamRequiredError, "WAL segment to get required");
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
StringList *argListTemp = strLstDup(argList);
|
||||
String *walSegment = strNew("000000010000000100000001");
|
||||
strLstAdd(argListTemp, walSegment);
|
||||
harnessCfgLoad(strLstSize(argListTemp), strLstPtr(argListTemp));
|
||||
|
||||
TEST_ERROR(cmdArchiveGet(), ParamRequiredError, "Path to copy WAL segment required");
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
String *controlFile = strNew("db/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL);
|
||||
PgControlFile control = {.systemId = 0xFACEFACE, .controlVersion = 1002, .catalogVersion = 201707211};
|
||||
storagePutNP(storageNewWriteNP(storageTest, controlFile), bufNewC(sizeof(PgControlFile), &control));
|
||||
|
||||
storagePathCreateNP(storageTest, strNewFmt("%s/db/pg_wal", testPath()));
|
||||
|
||||
String *walFile = strNewFmt("%s/db/pg_wal/RECOVERYXLOG", testPath());
|
||||
strLstAdd(argListTemp, walFile);
|
||||
strLstAdd(argListTemp, strNewFmt("--pg1-path=%s/db", testPath()));
|
||||
harnessCfgLoad(strLstSize(argListTemp), strLstPtr(argListTemp));
|
||||
|
||||
int processId = fork();
|
||||
|
||||
// Test this in a fork so we can use different Perl options in later tests
|
||||
if (processId == 0)
|
||||
{
|
||||
TEST_ERROR(cmdArchiveGet(), FileMissingError, "!!!EMBEDDEDPERLERROR!!!");
|
||||
exit(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
int processStatus;
|
||||
|
||||
if (waitpid(processId, &processStatus, 0) != processId) // {uncoverable - fork() does not fail}
|
||||
THROW_SYS_ERROR(AssertError, "unable to find child process"); // {uncoverable+}
|
||||
|
||||
if (WEXITSTATUS(processStatus) != 0)
|
||||
THROW(AssertError, "perl exited with error %d", WEXITSTATUS(processStatus));
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
argListTemp = strLstDup(argList);
|
||||
strLstAdd(argListTemp, strNewFmt("--pg1-path=%s/db", testPath()));
|
||||
strLstAddZ(argListTemp, "00000001.history");
|
||||
strLstAdd(argListTemp, walFile);
|
||||
strLstAddZ(argListTemp, "--archive-async");
|
||||
harnessCfgLoad(strLstSize(argListTemp), strLstPtr(argListTemp));
|
||||
processId = fork();
|
||||
|
||||
// Test this in a fork so we can use different Perl options in later tests
|
||||
if (processId == 0)
|
||||
{
|
||||
TEST_ERROR(cmdArchiveGet(), FileMissingError, "!!!EMBEDDEDPERLERROR!!!");
|
||||
exit(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
int processStatus;
|
||||
|
||||
if (waitpid(processId, &processStatus, 0) != processId) // {uncoverable - fork() does not fail}
|
||||
THROW_SYS_ERROR(AssertError, "unable to find child process"); // {uncoverable+}
|
||||
|
||||
if (WEXITSTATUS(processStatus) != 0)
|
||||
THROW(AssertError, "perl exited with error %d", WEXITSTATUS(processStatus));
|
||||
}
|
||||
|
||||
// Make sure the process times out when there is nothing to get
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
strLstAdd(argList, strNewFmt("--spool-path=%s/spool", testPath()));
|
||||
strLstAddZ(argList, "--archive-async");
|
||||
strLstAdd(argList, walSegment);
|
||||
strLstAddZ(argList, "pg_wal/RECOVERYXLOG");
|
||||
harnessCfgLoad(strLstSize(argList), strLstPtr(argList));
|
||||
|
||||
TEST_RESULT_INT(cmdArchiveGet(), 1, "timeout getting WAL segment");
|
||||
|
||||
// Write out a bogus .error file to make sure it is ignored on the first loop
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
// String *errorFile = storagePathNP(storageSpool(), strNew(STORAGE_SPOOL_ARCHIVE_IN "/000000010000000100000001.error"));
|
||||
// storagePutNP(storageNewWriteNP(storageSpool(), errorFile), bufNewStr(strNew("25\n" BOGUS_STR)));
|
||||
//
|
||||
// TEST_ERROR(cmdArchiveGet(), AssertError, BOGUS_STR);
|
||||
// unlink(strPtr(errorFile));
|
||||
//
|
||||
// // Wait for the lock to be released
|
||||
// lockAcquire(cfgOptionStr(cfgOptLockPath), cfgOptionStr(cfgOptStanza), cfgLockType(), 30, true);
|
||||
// lockRelease(true);
|
||||
|
||||
// Check for missing WAL
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
strLstAdd(argList, strNewFmt("--pg1-path=%s/db", testPath()));
|
||||
harnessCfgLoad(strLstSize(argList), strLstPtr(argList));
|
||||
|
||||
storagePutNP(
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_IN "/%s.ok", strPtr(walSegment))), NULL);
|
||||
|
||||
TEST_RESULT_VOID(cmdArchiveGet(), "successful get of missing WAL");
|
||||
testLogResult("P00 INFO: unable to find WAL segment 000000010000000100000001");
|
||||
|
||||
TEST_RESULT_BOOL(
|
||||
storageExistsNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_IN "/%s.ok", strPtr(walSegment))), false,
|
||||
"check OK file was removed");
|
||||
|
||||
// Wait for the lock to be released
|
||||
lockAcquire(cfgOptionStr(cfgOptLockPath), cfgOptionStr(cfgOptStanza), cfgLockType(), 30, true);
|
||||
lockRelease(true);
|
||||
|
||||
// Write out a WAL segment for success
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
storagePutNP(
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_IN "/%s", strPtr(walSegment))),
|
||||
bufNewStr(strNew("SHOULD-BE-A-REAL-WAL-FILE")));
|
||||
|
||||
TEST_RESULT_VOID(cmdArchiveGet(), "successful get");
|
||||
testLogResult("P00 INFO: got WAL segment 000000010000000100000001 asynchronously");
|
||||
|
||||
TEST_RESULT_BOOL(storageExistsNP(storageTest, walFile), true, "check WAL segment was moved");
|
||||
storageRemoveP(storageTest, walFile, .errorOnMissing = true);
|
||||
|
||||
// Wait for the lock to be released
|
||||
lockAcquire(cfgOptionStr(cfgOptLockPath), cfgOptionStr(cfgOptStanza), cfgLockType(), 30, true);
|
||||
lockRelease(true);
|
||||
|
||||
// Write more WAL segments (in this case queue should be full)
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
strLstAddZ(argList, "--archive-get-queue-max=48");
|
||||
harnessCfgLoad(strLstSize(argList), strLstPtr(argList));
|
||||
|
||||
String *walSegment2 = strNew("000000010000000100000002");
|
||||
|
||||
storagePutNP(
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_IN "/%s", strPtr(walSegment))),
|
||||
bufNewStr(strNew("SHOULD-BE-A-REAL-WAL-FILE")));
|
||||
storagePutNP(
|
||||
storageNewWriteNP(storageSpool(), strNewFmt(STORAGE_SPOOL_ARCHIVE_IN "/%s", strPtr(walSegment2))),
|
||||
bufNewStr(strNew("SHOULD-BE-A-REAL-WAL-FILE")));
|
||||
|
||||
TEST_RESULT_VOID(cmdArchiveGet(), "successful get");
|
||||
testLogResult("P00 INFO: got WAL segment 000000010000000100000001 asynchronously");
|
||||
|
||||
TEST_RESULT_BOOL(storageExistsNP(storageTest, walFile), true, "check WAL segment was moved");
|
||||
|
||||
// Wait for the lock to be released
|
||||
lockAcquire(cfgOptionStr(cfgOptLockPath), cfgOptionStr(cfgOptStanza), cfgLockType(), 30, true);
|
||||
lockRelease(true);
|
||||
|
||||
// Make sure the process times out when it can't get a lock
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
TEST_RESULT_VOID(
|
||||
lockAcquire(cfgOptionStr(cfgOptLockPath), cfgOptionStr(cfgOptStanza), cfgLockType(), 30, true), "acquire lock");
|
||||
TEST_RESULT_VOID(lockClear(true), "clear lock");
|
||||
|
||||
TEST_RESULT_INT(cmdArchiveGet(), 1, "timeout waiting for lock");
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------------------------
|
||||
strLstAddZ(argList, BOGUS_STR);
|
||||
harnessCfgLoad(strLstSize(argList), strLstPtr(argList));
|
||||
|
||||
TEST_ERROR(cmdArchiveGet(), ParamRequiredError, "extra parameters found");
|
||||
}
|
||||
}
|
@ -105,10 +105,9 @@ testRun()
|
||||
"\n"
|
||||
"Command Options:\n"
|
||||
"\n"
|
||||
" --archive-async archive WAL segments asynchronously\n"
|
||||
" --archive-async push/get WAL segments asynchronously\n"
|
||||
" [default=n]\n"
|
||||
" --archive-push-queue-max limit size (in bytes) of the PostgreSQL\n"
|
||||
" archive queue\n"
|
||||
" --archive-push-queue-max maximum size of the PostgreSQL archive queue\n"
|
||||
" --archive-timeout archive timeout [default=60]\n"
|
||||
"\n"
|
||||
"General Options:\n"
|
||||
|
@ -61,7 +61,14 @@ testRun()
|
||||
"check spool out path");
|
||||
TEST_RESULT_STR(
|
||||
strPtr(storagePathNP(storage, strNewFmt("%s/%s", STORAGE_SPOOL_ARCHIVE_OUT, "file.ext"))),
|
||||
strPtr(strNewFmt("%s/archive/db/out/file.ext", testPath())), "check spool out path");
|
||||
strPtr(strNewFmt("%s/archive/db/out/file.ext", testPath())), "check spool out file");
|
||||
|
||||
TEST_RESULT_STR(
|
||||
strPtr(storagePathNP(storage, strNew(STORAGE_SPOOL_ARCHIVE_IN))), strPtr(strNewFmt("%s/archive/db/in", testPath())),
|
||||
"check spool in path");
|
||||
TEST_RESULT_STR(
|
||||
strPtr(storagePathNP(storage, strNewFmt("%s/%s", STORAGE_SPOOL_ARCHIVE_IN, "file.ext"))),
|
||||
strPtr(strNewFmt("%s/archive/db/in/file.ext", testPath())), "check spool in file");
|
||||
|
||||
TEST_ERROR(storagePathNP(storage, strNew("<" BOGUS_STR ">")), AssertError, "invalid expression '<BOGUS>'");
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user