diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 00000000..ab1a5888
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,94 @@
+name: Build Probackup
+
+on:
+ push:
+ branches:
+ - "**"
+ # Runs triggered by pull requests are disabled to prevent executing potentially unsafe code from public pull requests
+ # pull_request:
+ # branches:
+ # - main
+
+ # Allows you to run this workflow manually from the Actions tab
+ workflow_dispatch:
+
+jobs:
+
+ build-win2019:
+
+ runs-on:
+ - windows-2019
+
+ env:
+ zlib_dir: C:\dep\zlib
+
+ steps:
+
+ - uses: actions/checkout@v2
+
+ - name: Install pacman packages
+ run: |
+ $env:PATH += ";C:\msys64\usr\bin"
+ pacman -S --noconfirm --needed bison flex
+
+ - name: Make zlib
+ run: |
+ git clone -b v1.2.11 --depth 1 https://github.com/madler/zlib.git
+ cd zlib
+ cmake -DCMAKE_INSTALL_PREFIX:PATH=C:\dep\zlib -G "Visual Studio 16 2019" .
+ cmake --build . --config Release --target ALL_BUILD
+ cmake --build . --config Release --target INSTALL
+ copy C:\dep\zlib\lib\zlibstatic.lib C:\dep\zlib\lib\zdll.lib
+ copy C:\dep\zlib\bin\zlib.dll C:\dep\zlib\lib
+
+ - name: Get Postgres sources
+ run: git clone -b REL_14_STABLE https://github.com/postgres/postgres.git
+
+ # Copy ptrack to contrib to build the ptrack extension
+ # Convert line breaks in the patch file to LF otherwise the patch doesn't apply
+ - name: Get Ptrack sources
+ run: |
+ git clone -b master --depth 1 https://github.com/postgrespro/ptrack.git
+ Copy-Item -Path ptrack -Destination postgres\contrib -Recurse
+ (Get-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Raw).Replace("`r`n","`n") | Set-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Force -NoNewline
+ cd postgres
+ git apply -3 ../ptrack/patches/REL_14_STABLE-ptrack-core.diff
+
+ - name: Build Postgres
+ run: |
+ $env:PATH += ";C:\msys64\usr\bin"
+ cd postgres\src\tools\msvc
+ (Get-Content config_default.pl) -Replace "zlib *=>(.*?)(?=,? *#)", "zlib => '${{ env.zlib_dir }}'" | Set-Content config.pl
+ cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && .\build.bat"
+
+ - name: Build Probackup
+ run: cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && perl .\gen_probackup_project.pl `"${{ github.workspace }}`"\postgres"
+
+ - name: Install Postgres
+ run: |
+ cd postgres
+ src\tools\msvc\install.bat postgres_install
+
+ - name: Install Testgres
+ run: |
+ git clone -b no-port-for --single-branch --depth 1 https://github.com/postgrespro/testgres.git
+ cd testgres
+ python setup.py install
+
+ # Grant the Github runner user full control of the workspace for initdb to successfully process the data folder
+ - name: Test Probackup
+ run: |
+ icacls.exe "${{ github.workspace }}" /grant "${env:USERNAME}:(OI)(CI)F"
+ $env:PATH += ";${{ github.workspace }}\postgres\postgres_install\lib;${{ env.zlib_dir }}\lib"
+ $Env:LC_MESSAGES = "English"
+ $Env:PG_CONFIG = "${{ github.workspace }}\postgres\postgres_install\bin\pg_config.exe"
+ $Env:PGPROBACKUPBIN = "${{ github.workspace }}\postgres\Release\pg_probackup\pg_probackup.exe"
+ $Env:PG_PROBACKUP_PTRACK = "ON"
+ If (!$Env:MODE -Or $Env:MODE -Eq "basic") {
+ $Env:PG_PROBACKUP_TEST_BASIC = "ON"
+ python -m unittest -v tests
+ python -m unittest -v tests.init
+ } else {
+ python -m unittest -v tests.$Env:MODE
+ }
+
diff --git a/.gitignore b/.gitignore
index c0b4de33..50247360 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,7 +50,6 @@
/docker-compose.yml
/Dockerfile
/Dockerfile.in
-/run_tests.sh
/make_dockerfile.sh
/backup_restore.sh
diff --git a/.travis.yml b/.travis.yml
index 66333091..26b2bc4e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -41,11 +41,13 @@ env:
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=merge
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=option
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=page
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=ptrack
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=retention
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore
+# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=time_consuming
jobs:
allow_failures:
diff --git a/README.md b/README.md
index 060883a2..5da8d199 100644
--- a/README.md
+++ b/README.md
@@ -224,3 +224,17 @@ Postgres Professional, Moscow, Russia.
## Credits
`pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier.
+
+
+### Localization files (*.po)
+
+Description of how to add new translation languages.
+1. Add a flag --enable-nls in configure.
+2. Build postgres.
+3. Adding to nls.mk in folder pg_probackup required files in GETTEXT_FILES.
+4. In folder pg_probackup do 'make update-po'.
+5. As a result, the progname.pot file will be created. Copy the content and add it to the file with the desired language.
+6. Adding to nls.mk in folder pg_probackup required language in AVAIL_LANGUAGES.
+
+For more information, follow the link below:
+https://postgrespro.ru/docs/postgresql/12/nls-translator
diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml
index 86063b84..cb615fb1 100644
--- a/doc/pgprobackup.xml
+++ b/doc/pgprobackup.xml
@@ -3563,6 +3563,14 @@ pg_probackup catchup -b catchup_mode --source-pgdata=
of threads with the option:
pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --threads=num_threads
+
+
+
+ Before cloning/synchronising a PostgreSQL instance, you can run the
+ catchup command with the flag
+ to estimate the size of data files to be transferred, but make no changes on disk:
+
+pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --dry-run
@@ -3576,7 +3584,7 @@ pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replic
Another example shows how you can add a new remote standby server with the PostgreSQL data directory /replica-pgdata by running the catchup command in the FULL mode
on four parallel threads:
-
+
pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=FULL --remote-host=remote-hostname --remote-user=remote-unix-username -j 4
@@ -4482,7 +4490,7 @@ pg_probackup archive-get -B backup_dir --instance catchup_mode
--source-pgdata=path_to_pgdata_on_remote_server
--destination-pgdata=path_to_local_dir
-[--help] [-j | --threads=num_threads] [--stream]
+[--help] [-j | --threads=num_threads] [--stream] [--dry-run]
[--temp-slot] [-P | --perm-slot] [-S | --slot=slot_name]
[--exclude-path=PATHNAME]
[-T OLDDIR=NEWDIR]
@@ -4571,6 +4579,19 @@ pg_probackup catchup -b catchup_mode
+
+
+
+
+ Displays the total size of the files to be transferred by catchup.
+ This flag initiates a trial run of catchup, which does
+ not actually create, delete or move files on disk. WAL streaming is skipped with .
+ This flag also allows you to check that
+ all the options are correct and cloning/synchronising is ready to run.
+
+
+
+
=path_prefix
=path_prefix
@@ -4591,17 +4612,6 @@ pg_probackup catchup -b catchup_mode
-
-
-
-
- Copies the instance in STREAM WAL delivery mode,
- including all the necessary WAL files by streaming them from
- the instance server via replication protocol.
-
-
-
-
diff --git a/gen_probackup_project.pl b/gen_probackup_project.pl
index c24db122..8143b7d0 100644
--- a/gen_probackup_project.pl
+++ b/gen_probackup_project.pl
@@ -13,11 +13,11 @@ if (($#ARGV+1)==1)
{
$pgsrc = shift @ARGV;
if($pgsrc eq "--help"){
- print STDERR "Usage $0 pg-source-dir \n";
- print STDERR "Like this: \n";
- print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro \n";
- print STDERR "May be need input this before: \n";
- print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall\" amd64\n";
+ print STDERR "Usage $0 pg-source-dir\n";
+ print STDERR "Like this:\n";
+ print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro\n";
+ print STDERR "May need to run this first:\n";
+ print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat\" amd64\n";
exit 1;
}
}
@@ -133,7 +133,7 @@ sub build_pgprobackup
unless (-d 'src/tools/msvc' && -d 'src');
# my $vsVersion = DetermineVisualStudioVersion();
- my $vsVersion = '12.00';
+ my $vsVersion = '16.00';
$solution = CreateSolution($vsVersion, $config);
diff --git a/nls.mk b/nls.mk
new file mode 100644
index 00000000..981c1c4f
--- /dev/null
+++ b/nls.mk
@@ -0,0 +1,6 @@
+# contrib/pg_probackup/nls.mk
+CATALOG_NAME = pg_probackup
+AVAIL_LANGUAGES = ru
+GETTEXT_FILES = src/help.c
+GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS)
+GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS)
diff --git a/packaging/pkg/scripts/rpm.sh b/packaging/pkg/scripts/rpm.sh
index d03915c2..2fec4a70 100755
--- a/packaging/pkg/scripts/rpm.sh
+++ b/packaging/pkg/scripts/rpm.sh
@@ -20,7 +20,15 @@ ulimit -n 1024
if [ ${DISTRIB} = 'centos' ] ; then
sed -i 's|^baseurl=http://|baseurl=https://|g' /etc/yum.repos.d/*.repo
+ if [ ${DISTRIB_VERSION} = '8' ]; then
+ sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
+ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
+ fi
yum update -y
+ if [ ${DISTRIB_VERSION} = '8' ]; then
+ sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
+ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
+ fi
fi
# PACKAGES NEEDED
diff --git a/packaging/test/scripts/rpm.sh b/packaging/test/scripts/rpm.sh
index 92804a7f..3b680699 100755
--- a/packaging/test/scripts/rpm.sh
+++ b/packaging/test/scripts/rpm.sh
@@ -15,7 +15,16 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g')
if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then
# update of rpm package is broken in rhel-7 (26/12/2022)
- yum update -y
+ #yum update -y
+ if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
+ sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
+ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
+ fi
+ yum update -y
+ if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
+ sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
+ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
+ fi
fi
# yum upgrade -y || echo 'some packages in docker failed to upgrade'
# yum install -y sudo
diff --git a/packaging/test/scripts/rpm_forks.sh b/packaging/test/scripts/rpm_forks.sh
index 0d72040e..d5771169 100755
--- a/packaging/test/scripts/rpm_forks.sh
+++ b/packaging/test/scripts/rpm_forks.sh
@@ -15,7 +15,15 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g')
if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then
# update of rpm package is broken in rhel-7 (26/12/2022)
+ if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
+ sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
+ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
+ fi
yum update -y
+ if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
+ sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
+ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
+ fi
fi
if [ ${PBK_EDITION} == 'ent' ]; then
@@ -80,11 +88,13 @@ if [ $PBK_EDITION == 'std' ] ; then
# install POSTGRESQL
# rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-${DISTRIB_VERSION}-x86_64/pgdg-redhat-repo-latest.noarch.rpm
- if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then
- rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
- else
- rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
- fi
+ #if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then
+ # rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
+ #else
+ # rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
+ #fi
+ curl -o pgpro-repo-add.sh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/pgpro-repo-add.sh
+ sh pgpro-repo-add.sh
if [[ ${PG_VERSION} == '9.6' ]]; then
yum install -y postgrespro${PG_TOG}-server.x86_64
diff --git a/po/ru.po b/po/ru.po
new file mode 100644
index 00000000..1263675c
--- /dev/null
+++ b/po/ru.po
@@ -0,0 +1,1880 @@
+# Russian message translation file for pg_probackup
+# Copyright (C) 2022 PostgreSQL Global Development Group
+# This file is distributed under the same license as the pg_probackup (PostgreSQL) package.
+# Vyacheslav Makarov , 2022.
+msgid ""
+msgstr ""
+"Project-Id-Version: pg_probackup (PostgreSQL)\n"
+"Report-Msgid-Bugs-To: bugs@postgrespro.ru\n"
+"POT-Creation-Date: 2022-04-08 11:33+0300\n"
+"PO-Revision-Date: 2022-MO-DA HO:MI+ZONE\n"
+"Last-Translator: Vyacheslav Makarov \n"
+"Language-Team: Russian \n"
+"Language: ru\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
+"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n"
+
+#: src/help.c:84
+#, c-format
+msgid ""
+"\n"
+"%s - utility to manage backup/recovery of PostgreSQL database.\n"
+msgstr ""
+"\n"
+"%s - утилита для управления резервным копированием/восстановлением базы данных PostgreSQL.\n"
+
+#: src/help.c:86
+#, c-format
+msgid ""
+"\n"
+" %s help [COMMAND]\n"
+msgstr ""
+
+#: src/help.c:88
+#, c-format
+msgid ""
+"\n"
+" %s version\n"
+msgstr ""
+
+#: src/help.c:90
+#, c-format
+msgid ""
+"\n"
+" %s init -B backup-path\n"
+msgstr ""
+
+#: src/help.c:92
+#, c-format
+msgid ""
+"\n"
+" %s set-config -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:93 src/help.c:791
+#, c-format
+msgid " [-D pgdata-path]\n"
+msgstr ""
+
+#: src/help.c:94 src/help.c:130 src/help.c:218
+#, c-format
+msgid " [--external-dirs=external-directories-paths]\n"
+msgstr ""
+
+#: src/help.c:95 src/help.c:132 src/help.c:305 src/help.c:731 src/help.c:794
+#, c-format
+msgid " [--log-level-console=log-level-console]\n"
+msgstr ""
+
+#: src/help.c:96 src/help.c:133 src/help.c:306 src/help.c:732 src/help.c:795
+#, c-format
+msgid " [--log-level-file=log-level-file]\n"
+msgstr ""
+
+#: src/help.c:97 src/help.c:134 src/help.c:307 src/help.c:733 src/help.c:796
+#, c-format
+msgid " [--log-filename=log-filename]\n"
+msgstr ""
+
+#: src/help.c:98 src/help.c:135 src/help.c:308 src/help.c:734 src/help.c:797
+#, c-format
+msgid " [--error-log-filename=error-log-filename]\n"
+msgstr ""
+
+#: src/help.c:99 src/help.c:136 src/help.c:309 src/help.c:735 src/help.c:798
+#, c-format
+msgid " [--log-directory=log-directory]\n"
+msgstr ""
+
+#: src/help.c:100 src/help.c:137 src/help.c:310 src/help.c:736 src/help.c:799
+#, c-format
+msgid " [--log-rotation-size=log-rotation-size]\n"
+msgstr ""
+
+#: src/help.c:101 src/help.c:800
+#, c-format
+msgid " [--log-rotation-age=log-rotation-age]\n"
+msgstr ""
+
+#: src/help.c:102 src/help.c:140 src/help.c:203 src/help.c:313 src/help.c:674
+#: src/help.c:801
+#, c-format
+msgid " [--retention-redundancy=retention-redundancy]\n"
+msgstr ""
+
+#: src/help.c:103 src/help.c:141 src/help.c:204 src/help.c:314 src/help.c:675
+#: src/help.c:802
+#, c-format
+msgid " [--retention-window=retention-window]\n"
+msgstr ""
+
+#: src/help.c:104 src/help.c:142 src/help.c:205 src/help.c:315 src/help.c:676
+#: src/help.c:803
+#, c-format
+msgid " [--wal-depth=wal-depth]\n"
+msgstr ""
+
+#: src/help.c:105 src/help.c:144 src/help.c:235 src/help.c:317 src/help.c:804
+#: src/help.c:948
+#, c-format
+msgid " [--compress-algorithm=compress-algorithm]\n"
+msgstr ""
+
+#: src/help.c:106 src/help.c:145 src/help.c:236 src/help.c:318 src/help.c:805
+#: src/help.c:949
+#, c-format
+msgid " [--compress-level=compress-level]\n"
+msgstr ""
+
+#: src/help.c:107 src/help.c:232 src/help.c:806 src/help.c:945
+#, c-format
+msgid " [--archive-timeout=timeout]\n"
+msgstr ""
+
+#: src/help.c:108 src/help.c:147 src/help.c:259 src/help.c:320 src/help.c:807
+#: src/help.c:1045
+#, c-format
+msgid " [-d dbname] [-h host] [-p port] [-U username]\n"
+msgstr ""
+
+#: src/help.c:109 src/help.c:149 src/help.c:174 src/help.c:219 src/help.c:237
+#: src/help.c:247 src/help.c:261 src/help.c:322 src/help.c:449 src/help.c:808
+#: src/help.c:906 src/help.c:950 src/help.c:994 src/help.c:1047
+#, c-format
+msgid " [--remote-proto] [--remote-host]\n"
+msgstr ""
+
+#: src/help.c:110 src/help.c:150 src/help.c:175 src/help.c:220 src/help.c:238
+#: src/help.c:248 src/help.c:262 src/help.c:323 src/help.c:450 src/help.c:809
+#: src/help.c:907 src/help.c:951 src/help.c:995 src/help.c:1048
+#, c-format
+msgid " [--remote-port] [--remote-path] [--remote-user]\n"
+msgstr ""
+
+#: src/help.c:111 src/help.c:151 src/help.c:176 src/help.c:221 src/help.c:239
+#: src/help.c:249 src/help.c:263 src/help.c:324 src/help.c:451 src/help.c:1049
+#, c-format
+msgid " [--ssh-options]\n"
+msgstr ""
+
+#: src/help.c:112
+#, c-format
+msgid " [--restore-command=cmdline] [--archive-host=destination]\n"
+msgstr ""
+
+#: src/help.c:113 src/help.c:178
+#, c-format
+msgid " [--archive-port=port] [--archive-user=username]\n"
+msgstr ""
+
+#: src/help.c:114 src/help.c:119 src/help.c:123 src/help.c:153 src/help.c:179
+#: src/help.c:188 src/help.c:194 src/help.c:209 src/help.c:214 src/help.c:222
+#: src/help.c:226 src/help.c:240 src/help.c:250 src/help.c:264
+#, c-format
+msgid " [--help]\n"
+msgstr ""
+
+#: src/help.c:116
+#, c-format
+msgid ""
+"\n"
+" %s set-backup -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:117
+#, c-format
+msgid " -i backup-id [--ttl=interval] [--expire-time=timestamp]\n"
+msgstr ""
+
+#: src/help.c:118
+#, c-format
+msgid " [--note=text]\n"
+msgstr ""
+
+#: src/help.c:121
+#, c-format
+msgid ""
+"\n"
+" %s show-config -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:122
+#, c-format
+msgid " [--format=format]\n"
+msgstr ""
+
+#: src/help.c:125
+#, c-format
+msgid ""
+"\n"
+" %s backup -B backup-path -b backup-mode --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:126 src/help.c:299
+#, c-format
+msgid " [-D pgdata-path] [-C]\n"
+msgstr ""
+
+#: src/help.c:127 src/help.c:300
+#, c-format
+msgid " [--stream [-S slot-name] [--temp-slot]]\n"
+msgstr ""
+
+#: src/help.c:128 src/help.c:301
+#, c-format
+msgid " [--backup-pg-log] [-j num-threads] [--progress]\n"
+msgstr ""
+
+#: src/help.c:129 src/help.c:168 src/help.c:302 src/help.c:433
+#, c-format
+msgid " [--no-validate] [--skip-block-validation]\n"
+msgstr ""
+
+#: src/help.c:131 src/help.c:304
+#, c-format
+msgid " [--no-sync]\n"
+msgstr ""
+
+#: src/help.c:138 src/help.c:311
+#, c-format
+msgid " [--log-rotation-age=log-rotation-age] [--no-color]\n"
+msgstr ""
+
+#: src/help.c:139 src/help.c:312
+#, c-format
+msgid " [--delete-expired] [--delete-wal] [--merge-expired]\n"
+msgstr ""
+
+#: src/help.c:143 src/help.c:316
+#, c-format
+msgid " [--compress]\n"
+msgstr ""
+
+#: src/help.c:146 src/help.c:319
+#, c-format
+msgid " [--archive-timeout=archive-timeout]\n"
+msgstr ""
+
+#: src/help.c:148 src/help.c:260 src/help.c:321 src/help.c:1046
+#, c-format
+msgid " [-w --no-password] [-W --password]\n"
+msgstr ""
+
+#: src/help.c:152
+#, c-format
+msgid " [--ttl=interval] [--expire-time=timestamp] [--note=text]\n"
+msgstr ""
+
+#: src/help.c:156
+#, c-format
+msgid ""
+"\n"
+" %s restore -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:157 src/help.c:431
+#, c-format
+msgid " [-D pgdata-path] [-i backup-id] [-j num-threads]\n"
+msgstr ""
+
+#: src/help.c:158 src/help.c:183 src/help.c:439 src/help.c:552
+#, c-format
+msgid " [--recovery-target-time=time|--recovery-target-xid=xid\n"
+msgstr ""
+
+#: src/help.c:159 src/help.c:184 src/help.c:440 src/help.c:553
+#, c-format
+msgid " |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n"
+msgstr ""
+
+#: src/help.c:160 src/help.c:185 src/help.c:441 src/help.c:554
+#, c-format
+msgid " [--recovery-target-timeline=timeline]\n"
+msgstr ""
+
+#: src/help.c:161 src/help.c:442
+#, c-format
+msgid " [--recovery-target=immediate|latest]\n"
+msgstr ""
+
+#: src/help.c:162 src/help.c:186 src/help.c:443 src/help.c:555
+#, c-format
+msgid " [--recovery-target-name=target-name]\n"
+msgstr ""
+
+#: src/help.c:163 src/help.c:444
+#, c-format
+msgid " [--recovery-target-action=pause|promote|shutdown]\n"
+msgstr ""
+
+#: src/help.c:164 src/help.c:445 src/help.c:793
+#, c-format
+msgid " [--restore-command=cmdline]\n"
+msgstr ""
+
+#: src/help.c:165
+#, c-format
+msgid " [-R | --restore-as-replica] [--force]\n"
+msgstr ""
+
+#: src/help.c:166 src/help.c:447
+#, c-format
+msgid " [--primary-conninfo=primary_conninfo]\n"
+msgstr ""
+
+#: src/help.c:167 src/help.c:448
+#, c-format
+msgid " [-S | --primary-slot-name=slotname]\n"
+msgstr ""
+
+#: src/help.c:169
+#, c-format
+msgid " [-T OLDDIR=NEWDIR] [--progress]\n"
+msgstr ""
+
+#: src/help.c:170 src/help.c:435
+#, c-format
+msgid " [--external-mapping=OLDDIR=NEWDIR]\n"
+msgstr ""
+
+#: src/help.c:171
+#, c-format
+msgid " [--skip-external-dirs] [--no-sync]\n"
+msgstr ""
+
+#: src/help.c:172 src/help.c:437
+#, c-format
+msgid " [-I | --incremental-mode=none|checksum|lsn]\n"
+msgstr ""
+
+#: src/help.c:173
+#, c-format
+msgid " [--db-include | --db-exclude]\n"
+msgstr ""
+
+#: src/help.c:177
+#, c-format
+msgid " [--archive-host=hostname]\n"
+msgstr ""
+
+#: src/help.c:181
+#, c-format
+msgid ""
+"\n"
+" %s validate -B backup-path [--instance=instance_name]\n"
+msgstr ""
+
+#: src/help.c:182 src/help.c:551
+#, c-format
+msgid " [-i backup-id] [--progress] [-j num-threads]\n"
+msgstr ""
+
+#: src/help.c:187
+#, c-format
+msgid " [--skip-block-validation]\n"
+msgstr ""
+
+#: src/help.c:190
+#, c-format
+msgid ""
+"\n"
+" %s checkdb [-B backup-path] [--instance=instance_name]\n"
+msgstr ""
+
+#: src/help.c:191
+#, c-format
+msgid " [-D pgdata-path] [--progress] [-j num-threads]\n"
+msgstr ""
+
+#: src/help.c:192 src/help.c:603
+#, c-format
+msgid " [--amcheck] [--skip-block-validation]\n"
+msgstr ""
+
+#: src/help.c:193
+#, c-format
+msgid " [--heapallindexed] [--checkunique]\n"
+msgstr ""
+
+#: src/help.c:196
+#, c-format
+msgid ""
+"\n"
+" %s show -B backup-path\n"
+msgstr ""
+
+#: src/help.c:197 src/help.c:657
+#, c-format
+msgid " [--instance=instance_name [-i backup-id]]\n"
+msgstr ""
+
+#: src/help.c:198
+#, c-format
+msgid " [--format=format] [--archive]\n"
+msgstr ""
+
+#: src/help.c:199
+#, c-format
+msgid " [--no-color] [--help]\n"
+msgstr ""
+
+#: src/help.c:201
+#, c-format
+msgid ""
+"\n"
+" %s delete -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:202 src/help.c:673
+#, c-format
+msgid " [-j num-threads] [--progress]\n"
+msgstr ""
+
+#: src/help.c:206
+#, c-format
+msgid " [-i backup-id | --delete-expired | --merge-expired | --status=backup_status]\n"
+msgstr ""
+
+#: src/help.c:207
+#, c-format
+msgid " [--delete-wal]\n"
+msgstr ""
+
+#: src/help.c:208
+#, c-format
+msgid " [--dry-run] [--no-validate] [--no-sync]\n"
+msgstr ""
+
+#: src/help.c:211
+#, c-format
+msgid ""
+"\n"
+" %s merge -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:212
+#, c-format
+msgid " -i backup-id [--progress] [-j num-threads]\n"
+msgstr ""
+
+#: src/help.c:213 src/help.c:730
+#, c-format
+msgid " [--no-validate] [--no-sync]\n"
+msgstr ""
+
+#: src/help.c:216
+#, c-format
+msgid ""
+"\n"
+" %s add-instance -B backup-path -D pgdata-path\n"
+msgstr ""
+
+#: src/help.c:217 src/help.c:225 src/help.c:904
+#, c-format
+msgid " --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:224
+#, c-format
+msgid ""
+"\n"
+" %s del-instance -B backup-path\n"
+msgstr ""
+
+#: src/help.c:228
+#, c-format
+msgid ""
+"\n"
+" %s archive-push -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:229 src/help.c:244 src/help.c:942 src/help.c:990
+#, c-format
+msgid " --wal-file-name=wal-file-name\n"
+msgstr ""
+
+#: src/help.c:230 src/help.c:943 src/help.c:991
+#, c-format
+msgid " [--wal-file-path=wal-file-path]\n"
+msgstr ""
+
+#: src/help.c:231 src/help.c:245 src/help.c:944 src/help.c:992
+#, c-format
+msgid " [-j num-threads] [--batch-size=batch_size]\n"
+msgstr ""
+
+#: src/help.c:233 src/help.c:946
+#, c-format
+msgid " [--no-ready-rename] [--no-sync]\n"
+msgstr ""
+
+#: src/help.c:234 src/help.c:947
+#, c-format
+msgid " [--overwrite] [--compress]\n"
+msgstr ""
+
+#: src/help.c:242
+#, c-format
+msgid ""
+"\n"
+" %s archive-get -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:243
+#, c-format
+msgid " --wal-file-path=wal-file-path\n"
+msgstr ""
+
+#: src/help.c:246 src/help.c:993
+#, c-format
+msgid " [--no-validate-wal]\n"
+msgstr ""
+
+#: src/help.c:252
+#, c-format
+msgid ""
+"\n"
+" %s catchup -b catchup-mode\n"
+msgstr ""
+
+#: src/help.c:253 src/help.c:1039
+#, c-format
+msgid " --source-pgdata=path_to_pgdata_on_remote_server\n"
+msgstr ""
+
+#: src/help.c:254 src/help.c:1040
+#, c-format
+msgid " --destination-pgdata=path_to_local_dir\n"
+msgstr ""
+
+#: src/help.c:255
+#, c-format
+msgid " [--stream [-S slot-name] [--temp-slot | --perm-slot]]\n"
+msgstr ""
+
+#: src/help.c:256 src/help.c:1042
+#, c-format
+msgid " [-j num-threads]\n"
+msgstr ""
+
+#: src/help.c:257 src/help.c:434 src/help.c:1043
+#, c-format
+msgid " [-T OLDDIR=NEWDIR]\n"
+msgstr ""
+
+#: src/help.c:258 src/help.c:1044
+#, c-format
+msgid " [--exclude-path=path_prefix]\n"
+msgstr ""
+
+#: src/help.c:270
+#, c-format
+msgid "Read the website for details <%s>.\n"
+msgstr "Подробнее читайте на сайте <%s>.\n"
+
+#: src/help.c:272
+#, c-format
+msgid "Report bugs to <%s>.\n"
+msgstr "Сообщайте об ошибках в <%s>.\n"
+
+#: src/help.c:279
+#, c-format
+msgid ""
+"\n"
+"Unknown command. Try pg_probackup help\n"
+"\n"
+msgstr ""
+"\n"
+"Неизвестная команда. Попробуйте pg_probackup help\n"
+"\n"
+
+#: src/help.c:285
+#, c-format
+msgid ""
+"\n"
+"This command is intended for internal use\n"
+"\n"
+msgstr ""
+
+#: src/help.c:291
+#, c-format
+msgid ""
+"\n"
+"%s init -B backup-path\n"
+"\n"
+msgstr ""
+
+#: src/help.c:292
+#, c-format
+msgid ""
+" -B, --backup-path=backup-path location of the backup storage area\n"
+"\n"
+msgstr ""
+
+#: src/help.c:298
+#, c-format
+msgid ""
+"\n"
+"%s backup -B backup-path -b backup-mode --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:303 src/help.c:792
+#, c-format
+msgid " [-E external-directories-paths]\n"
+msgstr ""
+
+#: src/help.c:325
+#, c-format
+msgid ""
+" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:327 src/help.c:455 src/help.c:558 src/help.c:606 src/help.c:660
+#: src/help.c:679 src/help.c:739 src/help.c:812 src/help.c:895 src/help.c:910
+#: src/help.c:934 src/help.c:954 src/help.c:998
+#, c-format
+msgid " -B, --backup-path=backup-path location of the backup storage area\n"
+msgstr ""
+
+#: src/help.c:328
+#, c-format
+msgid " -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n"
+msgstr ""
+
+#: src/help.c:329 src/help.c:456 src/help.c:559 src/help.c:607 src/help.c:680
+#: src/help.c:740 src/help.c:813 src/help.c:896
+#, c-format
+msgid " --instance=instance_name name of the instance\n"
+msgstr ""
+
+#: src/help.c:330 src/help.c:458 src/help.c:608 src/help.c:814 src/help.c:911
+#, c-format
+msgid " -D, --pgdata=pgdata-path location of the database storage area\n"
+msgstr ""
+
+#: src/help.c:331
+#, c-format
+msgid " -C, --smooth-checkpoint do smooth checkpoint before backup\n"
+msgstr ""
+
+#: src/help.c:332
+#, c-format
+msgid " --stream stream the transaction log and include it in the backup\n"
+msgstr ""
+
+#: src/help.c:333 src/help.c:1054
+#, c-format
+msgid " -S, --slot=SLOTNAME replication slot to use\n"
+msgstr ""
+
+#: src/help.c:334 src/help.c:1055
+#, c-format
+msgid " --temp-slot use temporary replication slot\n"
+msgstr ""
+
+#: src/help.c:335
+#, c-format
+msgid " --backup-pg-log backup of '%s' directory\n"
+msgstr ""
+
+#: src/help.c:336 src/help.c:460 src/help.c:563 src/help.c:611 src/help.c:682
+#: src/help.c:743 src/help.c:960 src/help.c:1004 src/help.c:1058
+#, c-format
+msgid " -j, --threads=NUM number of parallel threads\n"
+msgstr ""
+
+#: src/help.c:337 src/help.c:462 src/help.c:562 src/help.c:610 src/help.c:683
+#: src/help.c:744
+#, c-format
+msgid " --progress show progress\n"
+msgstr ""
+
+#: src/help.c:338
+#, c-format
+msgid " --no-validate disable validation after backup\n"
+msgstr ""
+
+#: src/help.c:339 src/help.c:466 src/help.c:573
+#, c-format
+msgid " --skip-block-validation set to validate only file-level checksum\n"
+msgstr ""
+
+#: src/help.c:340 src/help.c:815 src/help.c:914
+#, c-format
+msgid " -E --external-dirs=external-directories-paths\n"
+msgstr ""
+
+#: src/help.c:341 src/help.c:816 src/help.c:915
+#, c-format
+msgid " backup some directories not from pgdata \n"
+msgstr ""
+
+#: src/help.c:342 src/help.c:817 src/help.c:916
+#, c-format
+msgid " (example: --external-dirs=/tmp/dir1:/tmp/dir2)\n"
+msgstr ""
+
+#: src/help.c:343
+#, c-format
+msgid " --no-sync do not sync backed up files to disk\n"
+msgstr ""
+
+#: src/help.c:344
+#, c-format
+msgid " --note=text add note to backup\n"
+msgstr ""
+
+#: src/help.c:345 src/help.c:784
+#, c-format
+msgid " (example: --note='backup before app update to v13.1')\n"
+msgstr ""
+
+#: src/help.c:347 src/help.c:508 src/help.c:575 src/help.c:622 src/help.c:702
+#: src/help.c:748 src/help.c:820
+#, c-format
+msgid ""
+"\n"
+" Logging options:\n"
+msgstr ""
+
+#: src/help.c:348 src/help.c:509 src/help.c:576 src/help.c:623 src/help.c:703
+#: src/help.c:749 src/help.c:821
+#, c-format
+msgid " --log-level-console=log-level-console\n"
+msgstr ""
+
+#: src/help.c:349 src/help.c:510 src/help.c:577 src/help.c:624 src/help.c:704
+#: src/help.c:750 src/help.c:822
+#, c-format
+msgid " level for console logging (default: info)\n"
+msgstr ""
+
+#: src/help.c:350 src/help.c:353 src/help.c:511 src/help.c:514 src/help.c:578
+#: src/help.c:581 src/help.c:625 src/help.c:628 src/help.c:705 src/help.c:708
+#: src/help.c:751 src/help.c:754 src/help.c:823 src/help.c:826
+#, c-format
+msgid " available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"
+msgstr ""
+
+#: src/help.c:351 src/help.c:512 src/help.c:579 src/help.c:626 src/help.c:706
+#: src/help.c:752 src/help.c:824
+#, c-format
+msgid " --log-level-file=log-level-file\n"
+msgstr ""
+
+#: src/help.c:352 src/help.c:513 src/help.c:580 src/help.c:627 src/help.c:707
+#: src/help.c:753 src/help.c:825
+#, c-format
+msgid " level for file logging (default: off)\n"
+msgstr ""
+
+#: src/help.c:354 src/help.c:515 src/help.c:582 src/help.c:629 src/help.c:709
+#: src/help.c:755 src/help.c:827
+#, c-format
+msgid " --log-filename=log-filename\n"
+msgstr ""
+
+#: src/help.c:355 src/help.c:516 src/help.c:583 src/help.c:630 src/help.c:710
+#: src/help.c:756 src/help.c:828
+#, c-format
+msgid " filename for file logging (default: 'pg_probackup.log')\n"
+msgstr ""
+
+#: src/help.c:356 src/help.c:517 src/help.c:584 src/help.c:711 src/help.c:757
+#: src/help.c:829
+#, c-format
+msgid " support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n"
+msgstr ""
+
+#: src/help.c:357 src/help.c:518 src/help.c:585 src/help.c:632 src/help.c:712
+#: src/help.c:758 src/help.c:830
+#, c-format
+msgid " --error-log-filename=error-log-filename\n"
+msgstr ""
+
+#: src/help.c:358 src/help.c:519 src/help.c:586 src/help.c:633 src/help.c:713
+#: src/help.c:759 src/help.c:831
+#, c-format
+msgid " filename for error logging (default: none)\n"
+msgstr ""
+
+#: src/help.c:359 src/help.c:520 src/help.c:587 src/help.c:634 src/help.c:714
+#: src/help.c:760 src/help.c:832
+#, c-format
+msgid " --log-directory=log-directory\n"
+msgstr ""
+
+#: src/help.c:360 src/help.c:521 src/help.c:588 src/help.c:635 src/help.c:715
+#: src/help.c:761 src/help.c:833
+#, c-format
+msgid " directory for file logging (default: BACKUP_PATH/log)\n"
+msgstr ""
+
+#: src/help.c:361 src/help.c:522 src/help.c:589 src/help.c:636 src/help.c:716
+#: src/help.c:762 src/help.c:834
+#, c-format
+msgid " --log-rotation-size=log-rotation-size\n"
+msgstr ""
+
+#: src/help.c:362 src/help.c:523 src/help.c:590 src/help.c:637 src/help.c:717
+#: src/help.c:763 src/help.c:835
+#, c-format
+msgid " rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n"
+msgstr ""
+
+#: src/help.c:363 src/help.c:524 src/help.c:591 src/help.c:638 src/help.c:718
+#: src/help.c:764 src/help.c:836
+#, c-format
+msgid " available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n"
+msgstr ""
+
+#: src/help.c:364 src/help.c:525 src/help.c:592 src/help.c:639 src/help.c:719
+#: src/help.c:765 src/help.c:837
+#, c-format
+msgid " --log-rotation-age=log-rotation-age\n"
+msgstr ""
+
+#: src/help.c:365 src/help.c:526 src/help.c:593 src/help.c:640 src/help.c:720
+#: src/help.c:766 src/help.c:838
+#, c-format
+msgid " rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"
+msgstr ""
+
+#: src/help.c:366 src/help.c:527 src/help.c:594 src/help.c:641 src/help.c:721
+#: src/help.c:767 src/help.c:839
+#, c-format
+msgid " available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"
+msgstr ""
+
+#: src/help.c:367 src/help.c:528 src/help.c:642
+#, c-format
+msgid " --no-color disable the coloring of error and warning console messages\n"
+msgstr ""
+
+#: src/help.c:369 src/help.c:687 src/help.c:841
+#, c-format
+msgid ""
+"\n"
+" Retention options:\n"
+msgstr ""
+
+#: src/help.c:370 src/help.c:688
+#, c-format
+msgid " --delete-expired delete backups expired according to current\n"
+msgstr ""
+
+#: src/help.c:371 src/help.c:373
+#, c-format
+msgid " retention policy after successful backup completion\n"
+msgstr ""
+
+#: src/help.c:372 src/help.c:690
+#, c-format
+msgid " --merge-expired merge backups expired according to current\n"
+msgstr ""
+
+#: src/help.c:374 src/help.c:692
+#, c-format
+msgid " --delete-wal remove redundant files in WAL archive\n"
+msgstr ""
+
+#: src/help.c:375 src/help.c:693 src/help.c:842
+#, c-format
+msgid " --retention-redundancy=retention-redundancy\n"
+msgstr ""
+
+#: src/help.c:376 src/help.c:694 src/help.c:843
+#, c-format
+msgid " number of full backups to keep; 0 disables; (default: 0)\n"
+msgstr ""
+
+#: src/help.c:377 src/help.c:695 src/help.c:844
+#, c-format
+msgid " --retention-window=retention-window\n"
+msgstr ""
+
+#: src/help.c:378 src/help.c:696 src/help.c:845
+#, c-format
+msgid " number of days of recoverability; 0 disables; (default: 0)\n"
+msgstr ""
+
+#: src/help.c:379 src/help.c:697
+#, c-format
+msgid " --wal-depth=wal-depth number of latest valid backups per timeline that must\n"
+msgstr ""
+
+#: src/help.c:380 src/help.c:698
+#, c-format
+msgid " retain the ability to perform PITR; 0 disables; (default: 0)\n"
+msgstr ""
+
+#: src/help.c:381 src/help.c:699
+#, c-format
+msgid " --dry-run perform a trial run without any changes\n"
+msgstr ""
+
+#: src/help.c:383
+#, c-format
+msgid ""
+"\n"
+" Pinning options:\n"
+msgstr ""
+
+#: src/help.c:384 src/help.c:778
+#, c-format
+msgid " --ttl=interval pin backup for specified amount of time; 0 unpin\n"
+msgstr ""
+
+#: src/help.c:385 src/help.c:779
+#, c-format
+msgid " available units: 'ms', 's', 'min', 'h', 'd' (default: s)\n"
+msgstr ""
+
+#: src/help.c:386 src/help.c:780
+#, c-format
+msgid " (example: --ttl=20d)\n"
+msgstr ""
+
+#: src/help.c:387 src/help.c:781
+#, c-format
+msgid " --expire-time=time pin backup until specified time stamp\n"
+msgstr ""
+
+#: src/help.c:388 src/help.c:782
+#, c-format
+msgid " (example: --expire-time='2024-01-01 00:00:00+03')\n"
+msgstr ""
+
+#: src/help.c:390 src/help.c:849 src/help.c:967
+#, c-format
+msgid ""
+"\n"
+" Compression options:\n"
+msgstr ""
+
+#: src/help.c:391 src/help.c:850 src/help.c:968
+#, c-format
+msgid " --compress alias for --compress-algorithm='zlib' and --compress-level=1\n"
+msgstr ""
+
+#: src/help.c:392 src/help.c:851 src/help.c:969
+#, c-format
+msgid " --compress-algorithm=compress-algorithm\n"
+msgstr ""
+
+#: src/help.c:393
+#, c-format
+msgid " available options: 'zlib', 'pglz', 'none' (default: none)\n"
+msgstr ""
+
+#: src/help.c:394 src/help.c:853 src/help.c:971
+#, c-format
+msgid " --compress-level=compress-level\n"
+msgstr ""
+
+#: src/help.c:395 src/help.c:854 src/help.c:972
+#, c-format
+msgid " level of compression [0-9] (default: 1)\n"
+msgstr ""
+
+#: src/help.c:397 src/help.c:856
+#, c-format
+msgid ""
+"\n"
+" Archive options:\n"
+msgstr ""
+
+#: src/help.c:398 src/help.c:857
+#, c-format
+msgid " --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n"
+msgstr ""
+
+#: src/help.c:400 src/help.c:644 src/help.c:859 src/help.c:1066
+#, c-format
+msgid ""
+"\n"
+" Connection options:\n"
+msgstr ""
+
+#: src/help.c:401 src/help.c:645 src/help.c:860 src/help.c:1067
+#, c-format
+msgid " -U, --pguser=USERNAME user name to connect as (default: current local user)\n"
+msgstr ""
+
+#: src/help.c:402 src/help.c:646 src/help.c:861 src/help.c:1068
+#, c-format
+msgid " -d, --pgdatabase=DBNAME database to connect (default: username)\n"
+msgstr ""
+
+#: src/help.c:403 src/help.c:647 src/help.c:862 src/help.c:1069
+#, c-format
+msgid " -h, --pghost=HOSTNAME database server host or socket directory(default: 'local socket')\n"
+msgstr ""
+
+#: src/help.c:404 src/help.c:648 src/help.c:863 src/help.c:1070
+#, c-format
+msgid " -p, --pgport=PORT database server port (default: 5432)\n"
+msgstr ""
+
+#: src/help.c:405 src/help.c:649 src/help.c:1071
+#, c-format
+msgid " -w, --no-password never prompt for password\n"
+msgstr ""
+
+#: src/help.c:406
+#, c-format
+msgid " -W, --password force password prompt\n"
+msgstr ""
+
+#: src/help.c:408 src/help.c:530 src/help.c:865 src/help.c:917 src/help.c:974
+#: src/help.c:1009 src/help.c:1074
+#, c-format
+msgid ""
+"\n"
+" Remote options:\n"
+msgstr ""
+
+#: src/help.c:409 src/help.c:531 src/help.c:866 src/help.c:918 src/help.c:975
+#: src/help.c:1010 src/help.c:1075
+#, c-format
+msgid " --remote-proto=protocol remote protocol to use\n"
+msgstr ""
+
+#: src/help.c:410 src/help.c:532 src/help.c:867 src/help.c:919 src/help.c:976
+#: src/help.c:1011 src/help.c:1076
+#, c-format
+msgid " available options: 'ssh', 'none' (default: ssh)\n"
+msgstr ""
+
+#: src/help.c:411 src/help.c:533 src/help.c:868 src/help.c:920
+#, c-format
+msgid " --remote-host=destination remote host address or hostname\n"
+msgstr ""
+
+#: src/help.c:412 src/help.c:534 src/help.c:869 src/help.c:921 src/help.c:978
+#: src/help.c:1013 src/help.c:1078
+#, c-format
+msgid " --remote-port=port remote host port (default: 22)\n"
+msgstr ""
+
+#: src/help.c:413 src/help.c:535 src/help.c:870 src/help.c:922 src/help.c:979
+#: src/help.c:1014 src/help.c:1079
+#, c-format
+msgid " --remote-path=path path to directory with pg_probackup binary on remote host\n"
+msgstr ""
+
+#: src/help.c:414 src/help.c:536 src/help.c:871 src/help.c:923 src/help.c:980
+#: src/help.c:1015 src/help.c:1080
+#, c-format
+msgid " (default: current binary path)\n"
+msgstr ""
+
+#: src/help.c:415 src/help.c:537 src/help.c:872 src/help.c:924 src/help.c:981
+#: src/help.c:1016 src/help.c:1081
+#, c-format
+msgid " --remote-user=username user name for ssh connection (default: current user)\n"
+msgstr ""
+
+#: src/help.c:416 src/help.c:538 src/help.c:873 src/help.c:925 src/help.c:982
+#: src/help.c:1017 src/help.c:1082
+#, c-format
+msgid " --ssh-options=ssh_options additional ssh options (default: none)\n"
+msgstr ""
+
+#: src/help.c:417 src/help.c:539 src/help.c:874
+#, c-format
+msgid " (example: --ssh-options='-c cipher_spec -F configfile')\n"
+msgstr ""
+
+#: src/help.c:419 src/help.c:881
+#, c-format
+msgid ""
+"\n"
+" Replica options:\n"
+msgstr ""
+
+#: src/help.c:420 src/help.c:882
+#, c-format
+msgid " --master-user=user_name user name to connect to master (deprecated)\n"
+msgstr ""
+
+#: src/help.c:421 src/help.c:883
+#, c-format
+msgid " --master-db=db_name database to connect to master (deprecated)\n"
+msgstr ""
+
+#: src/help.c:422 src/help.c:884
+#, c-format
+msgid " --master-host=host_name database server host of master (deprecated)\n"
+msgstr ""
+
+#: src/help.c:423 src/help.c:885
+#, c-format
+msgid " --master-port=port database server port of master (deprecated)\n"
+msgstr ""
+
+#: src/help.c:424 src/help.c:886
+#, c-format
+msgid ""
+" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n"
+"\n"
+msgstr ""
+
+#: src/help.c:430
+#, c-format
+msgid ""
+"\n"
+"%s restore -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:432
+#, c-format
+msgid " [--progress] [--force] [--no-sync]\n"
+msgstr ""
+
+#: src/help.c:436
+#, c-format
+msgid " [--skip-external-dirs]\n"
+msgstr ""
+
+#: src/help.c:438
+#, c-format
+msgid " [--db-include dbname | --db-exclude dbname]\n"
+msgstr ""
+
+#: src/help.c:446
+#, c-format
+msgid " [-R | --restore-as-replica]\n"
+msgstr ""
+
+#: src/help.c:452
+#, c-format
+msgid " [--archive-host=hostname] [--archive-port=port]\n"
+msgstr ""
+
+#: src/help.c:453
+#, c-format
+msgid ""
+" [--archive-user=username]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:459
+#, c-format
+msgid " -i, --backup-id=backup-id backup to restore\n"
+msgstr ""
+
+#: src/help.c:463
+#, c-format
+msgid " --force ignore invalid status of the restored backup\n"
+msgstr ""
+
+#: src/help.c:464
+#, c-format
+msgid " --no-sync do not sync restored files to disk\n"
+msgstr ""
+
+#: src/help.c:465
+#, c-format
+msgid " --no-validate disable backup validation during restore\n"
+msgstr ""
+
+#: src/help.c:468 src/help.c:1060
+#, c-format
+msgid " -T, --tablespace-mapping=OLDDIR=NEWDIR\n"
+msgstr ""
+
+#: src/help.c:469 src/help.c:1061
+#, c-format
+msgid " relocate the tablespace from directory OLDDIR to NEWDIR\n"
+msgstr ""
+
+#: src/help.c:470
+#, c-format
+msgid " --external-mapping=OLDDIR=NEWDIR\n"
+msgstr ""
+
+#: src/help.c:471
+#, c-format
+msgid " relocate the external directory from OLDDIR to NEWDIR\n"
+msgstr ""
+
+#: src/help.c:472
+#, c-format
+msgid " --skip-external-dirs do not restore all external directories\n"
+msgstr ""
+
+#: src/help.c:474
+#, c-format
+msgid ""
+"\n"
+" Incremental restore options:\n"
+msgstr ""
+
+#: src/help.c:475
+#, c-format
+msgid " -I, --incremental-mode=none|checksum|lsn\n"
+msgstr ""
+
+#: src/help.c:476
+#, c-format
+msgid " reuse valid pages available in PGDATA if they have not changed\n"
+msgstr ""
+
+#: src/help.c:477
+#, c-format
+msgid " (default: none)\n"
+msgstr ""
+
+#: src/help.c:479
+#, c-format
+msgid ""
+"\n"
+" Partial restore options:\n"
+msgstr ""
+
+#: src/help.c:480
+#, c-format
+msgid " --db-include dbname restore only specified databases\n"
+msgstr ""
+
+#: src/help.c:481
+#, c-format
+msgid " --db-exclude dbname do not restore specified databases\n"
+msgstr ""
+
+#: src/help.c:483
+#, c-format
+msgid ""
+"\n"
+" Recovery options:\n"
+msgstr ""
+
+#: src/help.c:484 src/help.c:564
+#, c-format
+msgid " --recovery-target-time=time time stamp up to which recovery will proceed\n"
+msgstr ""
+
+#: src/help.c:485 src/help.c:565
+#, c-format
+msgid " --recovery-target-xid=xid transaction ID up to which recovery will proceed\n"
+msgstr ""
+
+#: src/help.c:486 src/help.c:566
+#, c-format
+msgid " --recovery-target-lsn=lsn LSN of the write-ahead log location up to which recovery will proceed\n"
+msgstr ""
+
+#: src/help.c:487 src/help.c:567
+#, c-format
+msgid " --recovery-target-inclusive=boolean\n"
+msgstr ""
+
+#: src/help.c:488 src/help.c:568
+#, c-format
+msgid " whether we stop just after the recovery target\n"
+msgstr ""
+
+#: src/help.c:489 src/help.c:569
+#, c-format
+msgid " --recovery-target-timeline=timeline\n"
+msgstr ""
+
+#: src/help.c:490 src/help.c:570
+#, c-format
+msgid " recovering into a particular timeline\n"
+msgstr ""
+
+#: src/help.c:491
+#, c-format
+msgid " --recovery-target=immediate|latest\n"
+msgstr ""
+
+#: src/help.c:492
+#, c-format
+msgid " end recovery as soon as a consistent state is reached or as late as possible\n"
+msgstr ""
+
+#: src/help.c:493 src/help.c:571
+#, c-format
+msgid " --recovery-target-name=target-name\n"
+msgstr ""
+
+#: src/help.c:494 src/help.c:572
+#, c-format
+msgid " the named restore point to which recovery will proceed\n"
+msgstr ""
+
+#: src/help.c:495
+#, c-format
+msgid " --recovery-target-action=pause|promote|shutdown\n"
+msgstr ""
+
+#: src/help.c:496
+#, c-format
+msgid " action the server should take once the recovery target is reached\n"
+msgstr ""
+
+#: src/help.c:497
+#, c-format
+msgid " (default: pause)\n"
+msgstr ""
+
+#: src/help.c:498 src/help.c:818
+#, c-format
+msgid " --restore-command=cmdline command to use as 'restore_command' in recovery.conf; 'none' disables\n"
+msgstr ""
+
+#: src/help.c:500
+#, c-format
+msgid ""
+"\n"
+" Standby options:\n"
+msgstr ""
+
+#: src/help.c:501
+#, c-format
+msgid " -R, --restore-as-replica write a minimal recovery.conf in the output directory\n"
+msgstr ""
+
+#: src/help.c:502
+#, c-format
+msgid " to ease setting up a standby server\n"
+msgstr ""
+
+#: src/help.c:503
+#, c-format
+msgid " --primary-conninfo=primary_conninfo\n"
+msgstr ""
+
+#: src/help.c:504
+#, c-format
+msgid " connection string to be used for establishing connection\n"
+msgstr ""
+
+#: src/help.c:505
+#, c-format
+msgid " with the primary server\n"
+msgstr ""
+
+#: src/help.c:506
+#, c-format
+msgid " -S, --primary-slot-name=slotname replication slot to be used for WAL streaming from the primary server\n"
+msgstr ""
+
+#: src/help.c:541 src/help.c:876
+#, c-format
+msgid ""
+"\n"
+" Remote WAL archive options:\n"
+msgstr ""
+
+#: src/help.c:542 src/help.c:877
+#, c-format
+msgid " --archive-host=destination address or hostname for ssh connection to archive host\n"
+msgstr ""
+
+#: src/help.c:543 src/help.c:878
+#, c-format
+msgid " --archive-port=port port for ssh connection to archive host (default: 22)\n"
+msgstr ""
+
+#: src/help.c:544
+#, c-format
+msgid ""
+" --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n"
+"\n"
+msgstr ""
+
+#: src/help.c:550
+#, c-format
+msgid ""
+"\n"
+"%s validate -B backup-path [--instance=instance_name]\n"
+msgstr ""
+
+#: src/help.c:556
+#, c-format
+msgid ""
+" [--skip-block-validation]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:560
+#, c-format
+msgid " -i, --backup-id=backup-id backup to validate\n"
+msgstr ""
+
+#: src/help.c:595 src/help.c:722 src/help.c:768
+#, c-format
+msgid ""
+" --no-color disable the coloring of error and warning console messages\n"
+"\n"
+msgstr ""
+
+#: src/help.c:601
+#, c-format
+msgid ""
+"\n"
+"%s checkdb [-B backup-path] [--instance=instance_name]\n"
+msgstr ""
+
+#: src/help.c:602
+#, c-format
+msgid " [-D pgdata-path] [-j num-threads] [--progress]\n"
+msgstr ""
+
+#: src/help.c:604
+#, c-format
+msgid ""
+" [--heapallindexed] [--checkunique]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:612
+#, c-format
+msgid " --skip-block-validation skip file-level checking\n"
+msgstr ""
+
+#: src/help.c:613 src/help.c:618 src/help.c:620
+#, c-format
+msgid " can be used only with '--amcheck' option\n"
+msgstr ""
+
+#: src/help.c:614
+#, c-format
+msgid " --amcheck in addition to file-level block checking\n"
+msgstr ""
+
+#: src/help.c:615
+#, c-format
+msgid " check btree indexes via function 'bt_index_check()'\n"
+msgstr ""
+
+#: src/help.c:616
+#, c-format
+msgid " using 'amcheck' or 'amcheck_next' extensions\n"
+msgstr ""
+
+#: src/help.c:617
+#, c-format
+msgid " --heapallindexed also check that heap is indexed\n"
+msgstr ""
+
+#: src/help.c:619
+#, c-format
+msgid " --checkunique also check unique constraints\n"
+msgstr ""
+
+#: src/help.c:631
+#, c-format
+msgid " support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n"
+msgstr ""
+
+#: src/help.c:650 src/help.c:1072
+#, c-format
+msgid ""
+" -W, --password force password prompt\n"
+"\n"
+msgstr ""
+
+#: src/help.c:656
+#, c-format
+msgid ""
+"\n"
+"%s show -B backup-path\n"
+msgstr ""
+
+#: src/help.c:658
+#, c-format
+msgid ""
+" [--format=format] [--archive]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:661
+#, c-format
+msgid " --instance=instance_name show info about specific instance\n"
+msgstr ""
+
+#: src/help.c:662
+#, c-format
+msgid " -i, --backup-id=backup-id show info about specific backups\n"
+msgstr ""
+
+#: src/help.c:663
+#, c-format
+msgid " --archive show WAL archive information\n"
+msgstr ""
+
+#: src/help.c:664
+#, c-format
+msgid " --format=format show format=PLAIN|JSON\n"
+msgstr ""
+
+#: src/help.c:665
+#, c-format
+msgid ""
+" --no-color disable the coloring for plain format\n"
+"\n"
+msgstr ""
+
+#: src/help.c:671
+#, c-format
+msgid ""
+"\n"
+"%s delete -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:672
+#, c-format
+msgid " [-i backup-id | --delete-expired | --merge-expired] [--delete-wal]\n"
+msgstr ""
+
+#: src/help.c:677
+#, c-format
+msgid ""
+" [--no-validate] [--no-sync]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:681
+#, c-format
+msgid " -i, --backup-id=backup-id backup to delete\n"
+msgstr ""
+
+#: src/help.c:684 src/help.c:745
+#, c-format
+msgid " --no-validate disable validation during retention merge\n"
+msgstr ""
+
+#: src/help.c:685 src/help.c:746
+#, c-format
+msgid " --no-sync do not sync merged files to disk\n"
+msgstr ""
+
+#: src/help.c:689 src/help.c:691
+#, c-format
+msgid " retention policy\n"
+msgstr ""
+
+#: src/help.c:700
+#, c-format
+msgid " --status=backup_status delete all backups with specified status\n"
+msgstr ""
+
+#: src/help.c:728
+#, c-format
+msgid ""
+"\n"
+"%s merge -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:729
+#, c-format
+msgid " -i backup-id [-j num-threads] [--progress]\n"
+msgstr ""
+
+#: src/help.c:737
+#, c-format
+msgid ""
+" [--log-rotation-age=log-rotation-age]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:741
+#, c-format
+msgid " -i, --backup-id=backup-id backup to merge\n"
+msgstr ""
+
+#: src/help.c:774
+#, c-format
+msgid ""
+"\n"
+"%s set-backup -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:775
+#, c-format
+msgid " -i backup-id\n"
+msgstr ""
+
+#: src/help.c:776
+#, c-format
+msgid ""
+" [--ttl=interval] [--expire-time=time] [--note=text]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:783
+#, c-format
+msgid " --note=text add note to backup; 'none' to remove note\n"
+msgstr ""
+
+#: src/help.c:790
+#, c-format
+msgid ""
+"\n"
+"%s set-config -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:810 src/help.c:908 src/help.c:952 src/help.c:996
+#, c-format
+msgid ""
+" [--ssh-options]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:846
+#, c-format
+msgid " --wal-depth=wal-depth number of latest valid backups with ability to perform\n"
+msgstr ""
+
+#: src/help.c:847
+#, c-format
+msgid " the point in time recovery; disables; (default: 0)\n"
+msgstr ""
+
+#: src/help.c:852 src/help.c:970
+#, c-format
+msgid " available options: 'zlib','pglz','none' (default: 'none')\n"
+msgstr ""
+
+#: src/help.c:879
+#, c-format
+msgid " --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n"
+msgstr ""
+
+#: src/help.c:892
+#, c-format
+msgid ""
+"\n"
+"%s show-config -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:893
+#, c-format
+msgid ""
+" [--format=format]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:897
+#, c-format
+msgid ""
+" --format=format show format=PLAIN|JSON\n"
+"\n"
+msgstr ""
+
+#: src/help.c:903
+#, c-format
+msgid ""
+"\n"
+"%s add-instance -B backup-path -D pgdata-path\n"
+msgstr ""
+
+#: src/help.c:905
+#, c-format
+msgid " [-E external-directory-path]\n"
+msgstr ""
+
+#: src/help.c:912
+#, c-format
+msgid " --instance=instance_name name of the new instance\n"
+msgstr ""
+
+#: src/help.c:926 src/help.c:983 src/help.c:1018 src/help.c:1083
+#, c-format
+msgid ""
+" (example: --ssh-options='-c cipher_spec -F configfile')\n"
+"\n"
+msgstr ""
+
+#: src/help.c:932
+#, c-format
+msgid ""
+"\n"
+"%s del-instance -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:935
+#, c-format
+msgid ""
+" --instance=instance_name name of the instance to delete\n"
+"\n"
+msgstr ""
+
+#: src/help.c:941
+#, c-format
+msgid ""
+"\n"
+"%s archive-push -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:955 src/help.c:999
+#, c-format
+msgid " --instance=instance_name name of the instance to delete\n"
+msgstr ""
+
+#: src/help.c:956 src/help.c:1002
+#, c-format
+msgid " --wal-file-name=wal-file-name\n"
+msgstr ""
+
+#: src/help.c:957
+#, c-format
+msgid " name of the file to copy into WAL archive\n"
+msgstr ""
+
+#: src/help.c:958 src/help.c:1000
+#, c-format
+msgid " --wal-file-path=wal-file-path\n"
+msgstr ""
+
+#: src/help.c:959
+#, c-format
+msgid " relative destination path of the WAL archive\n"
+msgstr ""
+
+#: src/help.c:961
+#, c-format
+msgid " --batch-size=NUM number of files to be copied\n"
+msgstr ""
+
+#: src/help.c:962
+#, c-format
+msgid " --archive-timeout=timeout wait timeout before discarding stale temp file(default: 5min)\n"
+msgstr ""
+
+#: src/help.c:963
+#, c-format
+msgid " --no-ready-rename do not rename '.ready' files in 'archive_status' directory\n"
+msgstr ""
+
+#: src/help.c:964
+#, c-format
+msgid " --no-sync do not sync WAL file to disk\n"
+msgstr ""
+
+#: src/help.c:965
+#, c-format
+msgid " --overwrite overwrite archived WAL file\n"
+msgstr ""
+
+#: src/help.c:977 src/help.c:1012 src/help.c:1077
+#, c-format
+msgid " --remote-host=hostname remote host address or hostname\n"
+msgstr ""
+
+#: src/help.c:989
+#, c-format
+msgid ""
+"\n"
+"%s archive-get -B backup-path --instance=instance_name\n"
+msgstr ""
+
+#: src/help.c:1001
+#, c-format
+msgid " relative destination path name of the WAL file on the server\n"
+msgstr ""
+
+#: src/help.c:1003
+#, c-format
+msgid " name of the WAL file to retrieve from the archive\n"
+msgstr ""
+
+#: src/help.c:1005
+#, c-format
+msgid " --batch-size=NUM number of files to be prefetched\n"
+msgstr ""
+
+#: src/help.c:1006
+#, c-format
+msgid " --prefetch-dir=path location of the store area for prefetched WAL files\n"
+msgstr ""
+
+#: src/help.c:1007
+#, c-format
+msgid " --no-validate-wal skip validation of prefetched WAL file before using it\n"
+msgstr ""
+
+#: src/help.c:1024
+#, c-format
+msgid ""
+"\n"
+"%s help [command]\n"
+msgstr ""
+
+#: src/help.c:1025
+#, c-format
+msgid ""
+"%s command --help\n"
+"\n"
+msgstr ""
+
+#: src/help.c:1031
+#, c-format
+msgid ""
+"\n"
+"%s version\n"
+msgstr ""
+
+#: src/help.c:1032
+#, c-format
+msgid ""
+"%s --version\n"
+"\n"
+msgstr ""
+
+#: src/help.c:1038
+#, c-format
+msgid ""
+"\n"
+"%s catchup -b catchup-mode\n"
+msgstr ""
+
+#: src/help.c:1041
+#, c-format
+msgid " [--stream [-S slot-name]] [--temp-slot | --perm-slot]\n"
+msgstr ""
+
+#: src/help.c:1050
+#, c-format
+msgid ""
+" [--help]\n"
+"\n"
+msgstr ""
+
+#: src/help.c:1052
+#, c-format
+msgid " -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n"
+msgstr ""
+
+#: src/help.c:1053
+#, c-format
+msgid " --stream stream the transaction log (only supported mode)\n"
+msgstr ""
+
+#: src/help.c:1056
+#, c-format
+msgid " -P --perm-slot create permanent replication slot\n"
+msgstr ""
+
+#: src/help.c:1062
+#, c-format
+msgid " -x, --exclude-path=path_prefix files with path_prefix (relative to pgdata) will be\n"
+msgstr ""
+
+#: src/help.c:1063
+#, c-format
+msgid " excluded from catchup (can be used multiple times)\n"
+msgstr ""
+
+#: src/help.c:1064
+#, c-format
+msgid " Dangerous option! Use at your own risk!\n"
+msgstr ""
diff --git a/src/catalog.c b/src/catalog.c
index b4ed8c18..9d817913 100644
--- a/src/catalog.c
+++ b/src/catalog.c
@@ -1084,15 +1084,15 @@ get_backup_filelist(pgBackup *backup, bool strict)
COMP_FILE_CRC32(true, content_crc, buf, strlen(buf));
- get_control_value(buf, "path", path, NULL, true);
- get_control_value(buf, "size", NULL, &write_size, true);
- get_control_value(buf, "mode", NULL, &mode, true);
- get_control_value(buf, "is_datafile", NULL, &is_datafile, true);
- get_control_value(buf, "is_cfs", NULL, &is_cfs, false);
- get_control_value(buf, "crc", NULL, &crc, true);
- get_control_value(buf, "compress_alg", compress_alg_string, NULL, false);
- get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false);
- get_control_value(buf, "dbOid", NULL, &dbOid, false);
+ get_control_value_str(buf, "path", path, sizeof(path),true);
+ get_control_value_int64(buf, "size", &write_size, true);
+ get_control_value_int64(buf, "mode", &mode, true);
+ get_control_value_int64(buf, "is_datafile", &is_datafile, true);
+ get_control_value_int64(buf, "is_cfs", &is_cfs, false);
+ get_control_value_int64(buf, "crc", &crc, true);
+ get_control_value_str(buf, "compress_alg", compress_alg_string, sizeof(compress_alg_string), false);
+ get_control_value_int64(buf, "external_dir_num", &external_dir_num, false);
+ get_control_value_int64(buf, "dbOid", &dbOid, false);
file = pgFileInit(path);
file->write_size = (int64) write_size;
@@ -1107,28 +1107,28 @@ get_backup_filelist(pgBackup *backup, bool strict)
/*
* Optional fields
*/
- if (get_control_value(buf, "linked", linked, NULL, false) && linked[0])
+ if (get_control_value_str(buf, "linked", linked, sizeof(linked), false) && linked[0])
{
file->linked = pgut_strdup(linked);
canonicalize_path(file->linked);
}
- if (get_control_value(buf, "segno", NULL, &segno, false))
+ if (get_control_value_int64(buf, "segno", &segno, false))
file->segno = (int) segno;
- if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false))
+ if (get_control_value_int64(buf, "n_blocks", &n_blocks, false))
file->n_blocks = (int) n_blocks;
- if (get_control_value(buf, "n_headers", NULL, &n_headers, false))
+ if (get_control_value_int64(buf, "n_headers", &n_headers, false))
file->n_headers = (int) n_headers;
- if (get_control_value(buf, "hdr_crc", NULL, &hdr_crc, false))
+ if (get_control_value_int64(buf, "hdr_crc", &hdr_crc, false))
file->hdr_crc = (pg_crc32) hdr_crc;
- if (get_control_value(buf, "hdr_off", NULL, &hdr_off, false))
+ if (get_control_value_int64(buf, "hdr_off", &hdr_off, false))
file->hdr_off = hdr_off;
- if (get_control_value(buf, "hdr_size", NULL, &hdr_size, false))
+ if (get_control_value_int64(buf, "hdr_size", &hdr_size, false))
file->hdr_size = (int) hdr_size;
parray_append(files, file);
diff --git a/src/catchup.c b/src/catchup.c
index 1b8f8084..3c522afb 100644
--- a/src/catchup.c
+++ b/src/catchup.c
@@ -2,7 +2,7 @@
*
* catchup.c: sync DB cluster
*
- * Copyright (c) 2021, Postgres Professional
+ * Copyright (c) 2022, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@@ -507,16 +507,20 @@ catchup_multithreaded_copy(int num_threads,
/* Run threads */
thread_interrupted = false;
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
- for (i = 0; i < num_threads; i++)
+ if (!dry_run)
{
- elog(VERBOSE, "Start thread num: %i", i);
- pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i]));
+ for (i = 0; i < num_threads; i++)
+ {
+ elog(VERBOSE, "Start thread num: %i", i);
+ pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i]));
+ }
}
/* Wait threads */
for (i = 0; i < num_threads; i++)
{
- pthread_join(threads[i], NULL);
+ if (!dry_run)
+ pthread_join(threads[i], NULL);
all_threads_successful &= threads_args[i].completed;
transfered_bytes_result += threads_args[i].transfered_bytes;
}
@@ -706,9 +710,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
/* Start stream replication */
join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR);
- fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST);
- start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt,
- current.start_lsn, current.tli, false);
+ if (!dry_run)
+ {
+ fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST);
+ start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt,
+ current.start_lsn, current.tli, false);
+ }
+ else
+ elog(INFO, "WAL streaming skipping with --dry-run option");
source_filelist = parray_new();
@@ -779,9 +788,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
/* Build the page map from ptrack information */
make_pagemap_from_ptrack_2(source_filelist, source_conn,
- source_node_info.ptrack_schema,
- source_node_info.ptrack_version_num,
- dest_redo.lsn);
+ source_node_info.ptrack_schema,
+ source_node_info.ptrack_version_num,
+ dest_redo.lsn);
time(&end_time);
elog(INFO, "Pagemap successfully extracted, time elapsed: %.0f sec",
difftime(end_time, start_time));
@@ -820,9 +829,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
char dirpath[MAXPGPATH];
join_path_components(dirpath, dest_pgdata, file->rel_path);
-
elog(VERBOSE, "Create directory '%s'", dirpath);
- fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST);
+ if (!dry_run)
+ fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST);
}
else
{
@@ -853,15 +862,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"",
linked_path, to_path);
- /* create tablespace directory */
- if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0)
- elog(ERROR, "Could not create tablespace directory \"%s\": %s",
- linked_path, strerror(errno));
+ if (!dry_run)
+ {
+ /* create tablespace directory */
+ if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0)
+ elog(ERROR, "Could not create tablespace directory \"%s\": %s",
+ linked_path, strerror(errno));
- /* create link to linked_path */
- if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0)
- elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s",
- linked_path, to_path, strerror(errno));
+ /* create link to linked_path */
+ if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0)
+ elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s",
+ linked_path, to_path, strerror(errno));
+ }
}
}
@@ -930,7 +942,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
char fullpath[MAXPGPATH];
join_path_components(fullpath, dest_pgdata, file->rel_path);
- fio_delete(file->mode, fullpath, FIO_LOCAL_HOST);
+ if (!dry_run)
+ {
+ fio_delete(file->mode, fullpath, FIO_LOCAL_HOST);
+ }
elog(VERBOSE, "Deleted file \"%s\"", fullpath);
/* shrink dest pgdata list */
@@ -961,7 +976,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
catchup_isok = transfered_datafiles_bytes != -1;
/* at last copy control file */
- if (catchup_isok)
+ if (catchup_isok && !dry_run)
{
char from_fullpath[MAXPGPATH];
char to_fullpath[MAXPGPATH];
@@ -972,7 +987,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
transfered_datafiles_bytes += source_pg_control_file->size;
}
- if (!catchup_isok)
+ if (!catchup_isok && !dry_run)
{
char pretty_time[20];
char pretty_transfered_data_bytes[20];
@@ -1010,14 +1025,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
pg_free(stop_backup_query_text);
}
- wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t);
+ if (!dry_run)
+ wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t);
#if PG_VERSION_NUM >= 90600
/* Write backup_label */
Assert(stop_backup_result.backup_label_content != NULL);
- pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label",
- stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len,
- NULL);
+ if (!dry_run)
+ {
+ pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label",
+ stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len,
+ NULL);
+ }
free(stop_backup_result.backup_label_content);
stop_backup_result.backup_label_content = NULL;
stop_backup_result.backup_label_content_len = 0;
@@ -1040,6 +1059,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
#endif
/* wait for end of wal streaming and calculate wal size transfered */
+ if (!dry_run)
{
parray *wal_files_list = NULL;
wal_files_list = parray_new();
@@ -1091,17 +1111,17 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
}
/* Sync all copied files unless '--no-sync' flag is used */
- if (sync_dest_files)
+ if (sync_dest_files && !dry_run)
catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file);
else
elog(WARNING, "Files are not synced to disk");
/* Cleanup */
- if (dest_filelist)
+ if (dest_filelist && !dry_run)
{
parray_walk(dest_filelist, pgFileFree);
- parray_free(dest_filelist);
}
+ parray_free(dest_filelist);
parray_walk(source_filelist, pgFileFree);
parray_free(source_filelist);
pgFileFree(source_pg_control_file);
diff --git a/src/data.c b/src/data.c
index f02e3fd1..e5a55112 100644
--- a/src/data.c
+++ b/src/data.c
@@ -349,6 +349,8 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
Assert(false);
}
}
+ /* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */
+ fflush(in);
}
/*
@@ -2030,10 +2032,10 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph,
return false; /* EOF found */
else if (read_len != 0 && feof(in))
elog(ERROR,
- "Odd size page found at offset %lu of \"%s\"",
+ "Odd size page found at offset %ld of \"%s\"",
ftello(in), fullpath);
else
- elog(ERROR, "Cannot read header at offset %lu of \"%s\": %s",
+ elog(ERROR, "Cannot read header at offset %ld of \"%s\": %s",
ftello(in), fullpath, strerror(errno));
}
@@ -2321,7 +2323,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath,
elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s",
to_fullpath, strerror(errno));
{
- size_t pos = ftell(out);
+ long pos = ftell(out);
if (pos < 0)
elog(ERROR, "Cannot get position in destination file \"%s\": %s",
diff --git a/src/delete.c b/src/delete.c
index 6c70ff81..b86ed43e 100644
--- a/src/delete.c
+++ b/src/delete.c
@@ -36,7 +36,7 @@ do_delete(InstanceState *instanceState, time_t backup_id)
parray *backup_list,
*delete_list;
pgBackup *target_backup = NULL;
- size_t size_to_delete = 0;
+ int64 size_to_delete = 0;
char size_to_delete_pretty[20];
/* Get complete list of backups */
@@ -682,12 +682,11 @@ do_retention_wal(InstanceState *instanceState, bool dry_run)
* at least one backup and no file should be removed.
* Unless wal-depth is enabled.
*/
- if ((tlinfo->closest_backup) && instance_config.wal_depth <= 0)
+ if ((tlinfo->closest_backup) && instance_config.wal_depth == 0)
continue;
/* WAL retention keeps this timeline from purge */
- if (instance_config.wal_depth >= 0 && tlinfo->anchor_tli > 0 &&
- tlinfo->anchor_tli != tlinfo->tli)
+ if (tlinfo->anchor_tli > 0 && tlinfo->anchor_tli != tlinfo->tli)
continue;
/*
@@ -701,7 +700,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run)
*/
if (tlinfo->oldest_backup)
{
- if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
+ if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
{
delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn,
tlinfo, instance_config.xlog_seg_size, dry_run);
@@ -714,7 +713,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run)
}
else
{
- if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
+ if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn,
tlinfo, instance_config.xlog_seg_size, dry_run);
else
@@ -942,7 +941,7 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli
join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->file.name);
/* save segment from purging */
- if (instance_config.wal_depth >= 0 && wal_file->keep)
+ if (wal_file->keep)
{
elog(VERBOSE, "Retain WAL segment \"%s\"", wal_fullpath);
continue;
@@ -1027,7 +1026,7 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config,
parray *backup_list, *delete_list;
const char *pretty_status;
int n_deleted = 0, n_found = 0;
- size_t size_to_delete = 0;
+ int64 size_to_delete = 0;
char size_to_delete_pretty[20];
pgBackup *backup;
diff --git a/src/dir.c b/src/dir.c
index 4ebe0939..0b2d1877 100644
--- a/src/dir.c
+++ b/src/dir.c
@@ -8,6 +8,7 @@
*-------------------------------------------------------------------------
*/
+#include
#include "pg_probackup.h"
#include "utils/file.h"
@@ -130,6 +131,9 @@ static void opt_path_map(ConfigOption *opt, const char *arg,
TablespaceList *list, const char *type);
static void cleanup_tablespace(const char *path);
+static void control_string_bad_format(const char* str);
+
+
/* Tablespace mapping */
static TablespaceList tablespace_dirs = {NULL, NULL};
/* Extra directories mapping */
@@ -1036,13 +1040,20 @@ opt_externaldir_map(ConfigOption *opt, const char *arg)
*/
void
create_data_directories(parray *dest_files, const char *data_dir, const char *backup_dir,
- bool extract_tablespaces, bool incremental, fio_location location)
+ bool extract_tablespaces, bool incremental, fio_location location,
+ const char* waldir_path)
{
int i;
parray *links = NULL;
mode_t pg_tablespace_mode = DIR_PERMISSION;
char to_path[MAXPGPATH];
+ if (waldir_path && !dir_is_empty(waldir_path, location))
+ {
+ elog(ERROR, "WAL directory location is not empty: \"%s\"", waldir_path);
+ }
+
+
/* get tablespace map */
if (extract_tablespaces)
{
@@ -1107,6 +1118,27 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba
/* skip external directory content */
if (dir->external_dir_num != 0)
continue;
+ /* Create WAL directory and symlink if waldir_path is setting */
+ if (waldir_path && strcmp(dir->rel_path, PG_XLOG_DIR) == 0) {
+ /* get full path to PG_XLOG_DIR */
+
+ join_path_components(to_path, data_dir, PG_XLOG_DIR);
+
+ elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"",
+ waldir_path, to_path);
+
+ /* create tablespace directory from waldir_path*/
+ fio_mkdir(waldir_path, pg_tablespace_mode, location);
+
+ /* create link to linked_path */
+ if (fio_symlink(waldir_path, to_path, incremental, location) < 0)
+ elog(ERROR, "Could not create symbolic link \"%s\": %s",
+ to_path, strerror(errno));
+
+ continue;
+
+
+ }
/* tablespace_map exists */
if (links)
@@ -1467,7 +1499,7 @@ get_external_remap(char *current_dir)
return current_dir;
}
-/* Parsing states for get_control_value() */
+/* Parsing states for get_control_value_str() */
#define CONTROL_WAIT_NAME 1
#define CONTROL_INNAME 2
#define CONTROL_WAIT_COLON 3
@@ -1481,26 +1513,62 @@ get_external_remap(char *current_dir)
* The line has the following format:
* {"name1":"value1", "name2":"value2"}
*
- * The value will be returned to "value_str" as string if it is not NULL. If it
- * is NULL the value will be returned to "value_int64" as int64.
+ * The value will be returned in "value_int64" as int64.
+ *
+ * Returns true if the value was found in the line and parsed.
+ */
+bool
+get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory)
+{
+
+ char buf_int64[32];
+
+ assert(value_int64);
+
+ /* Set default value */
+ *value_int64 = 0;
+
+ if (!get_control_value_str(str, name, buf_int64, sizeof(buf_int64), is_mandatory))
+ return false;
+
+ if (!parse_int64(buf_int64, value_int64, 0))
+ {
+ /* We assume that too big value is -1 */
+ if (errno == ERANGE)
+ *value_int64 = BYTES_INVALID;
+ else
+ control_string_bad_format(str);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Get value from json-like line "str" of backup_content.control file.
+ *
+ * The line has the following format:
+ * {"name1":"value1", "name2":"value2"}
+ *
+ * The value will be returned to "value_str" as string.
*
* Returns true if the value was found in the line.
*/
+
bool
-get_control_value(const char *str, const char *name,
- char *value_str, int64 *value_int64, bool is_mandatory)
+get_control_value_str(const char *str, const char *name,
+ char *value_str, size_t value_str_size, bool is_mandatory)
{
int state = CONTROL_WAIT_NAME;
char *name_ptr = (char *) name;
char *buf = (char *) str;
- char buf_int64[32], /* Buffer for "value_int64" */
- *buf_int64_ptr = buf_int64;
+ char *const value_str_start = value_str;
- /* Set default values */
- if (value_str)
- *value_str = '\0';
- else if (value_int64)
- *value_int64 = 0;
+ assert(value_str);
+ assert(value_str_size > 0);
+
+ /* Set default value */
+ *value_str = '\0';
while (*buf)
{
@@ -1510,7 +1578,7 @@ get_control_value(const char *str, const char *name,
if (*buf == '"')
state = CONTROL_INNAME;
else if (IsAlpha(*buf))
- goto bad_format;
+ control_string_bad_format(str);
break;
case CONTROL_INNAME:
/* Found target field. Parse value. */
@@ -1529,57 +1597,32 @@ get_control_value(const char *str, const char *name,
if (*buf == ':')
state = CONTROL_WAIT_VALUE;
else if (!IsSpace(*buf))
- goto bad_format;
+ control_string_bad_format(str);
break;
case CONTROL_WAIT_VALUE:
if (*buf == '"')
{
state = CONTROL_INVALUE;
- buf_int64_ptr = buf_int64;
}
else if (IsAlpha(*buf))
- goto bad_format;
+ control_string_bad_format(str);
break;
case CONTROL_INVALUE:
/* Value was parsed, exit */
if (*buf == '"')
{
- if (value_str)
- {
- *value_str = '\0';
- }
- else if (value_int64)
- {
- /* Length of buf_uint64 should not be greater than 31 */
- if (buf_int64_ptr - buf_int64 >= 32)
- elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s",
- name, str, DATABASE_FILE_LIST);
-
- *buf_int64_ptr = '\0';
- if (!parse_int64(buf_int64, value_int64, 0))
- {
- /* We assume that too big value is -1 */
- if (errno == ERANGE)
- *value_int64 = BYTES_INVALID;
- else
- goto bad_format;
- }
- }
-
+ *value_str = '\0';
return true;
}
else
{
- if (value_str)
- {
- *value_str = *buf;
- value_str++;
- }
- else
- {
- *buf_int64_ptr = *buf;
- buf_int64_ptr++;
+ /* verify if value_str not exceeds value_str_size limits */
+ if (value_str - value_str_start >= value_str_size - 1) {
+ elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s",
+ name, str, DATABASE_FILE_LIST);
}
+ *value_str = *buf;
+ value_str++;
}
break;
case CONTROL_WAIT_NEXT_NAME:
@@ -1596,18 +1639,20 @@ get_control_value(const char *str, const char *name,
/* There is no close quotes */
if (state == CONTROL_INNAME || state == CONTROL_INVALUE)
- goto bad_format;
+ control_string_bad_format(str);
/* Did not find target field */
if (is_mandatory)
elog(ERROR, "field \"%s\" is not found in the line %s of the file %s",
name, str, DATABASE_FILE_LIST);
return false;
+}
-bad_format:
- elog(ERROR, "%s file has invalid format in line %s",
- DATABASE_FILE_LIST, str);
- return false; /* Make compiler happy */
+static void
+control_string_bad_format(const char* str)
+{
+ elog(ERROR, "%s file has invalid format in line %s",
+ DATABASE_FILE_LIST, str);
}
/*
@@ -1841,8 +1886,8 @@ read_database_map(pgBackup *backup)
db_map_entry *db_entry = (db_map_entry *) pgut_malloc(sizeof(db_map_entry));
- get_control_value(buf, "dbOid", NULL, &dbOid, true);
- get_control_value(buf, "datname", datname, NULL, true);
+ get_control_value_int64(buf, "dbOid", &dbOid, true);
+ get_control_value_str(buf, "datname", datname, sizeof(datname), true);
db_entry->dbOid = dbOid;
db_entry->datname = pgut_strdup(datname);
diff --git a/src/help.c b/src/help.c
index a494ab20..85894759 100644
--- a/src/help.c
+++ b/src/help.c
@@ -169,6 +169,7 @@ help_pg_probackup(void)
printf(_(" [-T OLDDIR=NEWDIR] [--progress]\n"));
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
printf(_(" [--skip-external-dirs] [--no-sync]\n"));
+ printf(_(" [-X WALDIR | --waldir=WALDIR]\n"));
printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n"));
printf(_(" [--db-include | --db-exclude]\n"));
printf(_(" [--remote-proto] [--remote-host]\n"));
@@ -261,15 +262,16 @@ help_pg_probackup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
+ printf(_(" [--dry-run]\n"));
printf(_(" [--help]\n"));
if ((PROGRAM_URL || PROGRAM_EMAIL))
{
printf("\n");
if (PROGRAM_URL)
- printf("Read the website for details. <%s>\n", PROGRAM_URL);
+ printf(_("Read the website for details <%s>.\n"), PROGRAM_URL);
if (PROGRAM_EMAIL)
- printf("Report bugs to <%s>.\n", PROGRAM_EMAIL);
+ printf(_("Report bugs to <%s>.\n"), PROGRAM_EMAIL);
}
}
@@ -434,6 +436,7 @@ help_restore(void)
printf(_(" [-T OLDDIR=NEWDIR]\n"));
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
printf(_(" [--skip-external-dirs]\n"));
+ printf(_(" [-X WALDIR | --waldir=WALDIR]\n"));
printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n"));
printf(_(" [--db-include dbname | --db-exclude dbname]\n"));
printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n"));
@@ -471,6 +474,10 @@ help_restore(void)
printf(_(" relocate the external directory from OLDDIR to NEWDIR\n"));
printf(_(" --skip-external-dirs do not restore all external directories\n"));
+
+ printf(_(" -X, --waldir=WALDIR location for the write-ahead log directory\n"));
+
+
printf(_("\n Incremental restore options:\n"));
printf(_(" -I, --incremental-mode=none|checksum|lsn\n"));
printf(_(" reuse valid pages available in PGDATA if they have not changed\n"));
@@ -1047,6 +1054,7 @@ help_catchup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
+ printf(_(" [--dry-run]\n"));
printf(_(" [--help]\n\n"));
printf(_(" -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n"));
@@ -1081,4 +1089,6 @@ help_catchup(void)
printf(_(" --remote-user=username user name for ssh connection (default: current user)\n"));
printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n"));
printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n"));
+
+ printf(_(" --dry-run perform a trial run without any changes\n\n"));
}
diff --git a/src/merge.c b/src/merge.c
index ff39c251..1ce92bb4 100644
--- a/src/merge.c
+++ b/src/merge.c
@@ -614,7 +614,7 @@ merge_chain(InstanceState *instanceState,
/* Create directories */
create_data_directories(dest_backup->files, full_database_dir,
- dest_backup->root_dir, false, false, FIO_BACKUP_HOST);
+ dest_backup->root_dir, false, false, FIO_BACKUP_HOST, NULL);
/* External directories stuff */
if (dest_backup->external_dir_str)
diff --git a/src/pg_probackup.c b/src/pg_probackup.c
index c5ed1317..193cd9c3 100644
--- a/src/pg_probackup.c
+++ b/src/pg_probackup.c
@@ -122,6 +122,7 @@ static parray *datname_include_list = NULL;
/* arrays for --exclude-path's */
static parray *exclude_absolute_paths_list = NULL;
static parray *exclude_relative_paths_list = NULL;
+static char* gl_waldir_path = NULL;
/* checkdb options */
bool need_amcheck = false;
@@ -238,6 +239,7 @@ static ConfigOption cmd_options[] =
{ 's', 160, "primary-conninfo", &primary_conninfo, SOURCE_CMD_STRICT },
{ 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT },
{ 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT },
+ { 's', 'X', "waldir", &gl_waldir_path, SOURCE_CMD_STRICT },
/* checkdb options */
{ 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT },
{ 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT },
@@ -308,6 +310,7 @@ main(int argc, char *argv[])
init_config(&instance_config, instance_name);
PROGRAM_NAME = get_progname(argv[0]);
+ set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_probackup"));
PROGRAM_FULL_PATH = palloc0(MAXPGPATH);
/* Get current time */
@@ -753,6 +756,21 @@ main(int argc, char *argv[])
restore_params->partial_restore_type = INCLUDE;
restore_params->partial_db_list = datname_include_list;
}
+
+ if (gl_waldir_path)
+ {
+ /* clean up xlog directory name, check it's absolute */
+ canonicalize_path(gl_waldir_path);
+ if (!is_absolute_path(gl_waldir_path))
+ {
+ elog(ERROR, "WAL directory location must be an absolute path");
+ }
+ if (strlen(gl_waldir_path) > MAXPGPATH)
+ elog(ERROR, "Value specified to --waldir is too long");
+
+ }
+ restore_params->waldir = gl_waldir_path;
+
}
/*
diff --git a/src/pg_probackup.h b/src/pg_probackup.h
index 4cd65980..2a96af0f 100644
--- a/src/pg_probackup.h
+++ b/src/pg_probackup.h
@@ -338,7 +338,7 @@ typedef enum ShowFormat
#define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */
#define FILE_NOT_FOUND (-2) /* file disappeared during backup */
#define BLOCKNUM_INVALID (-1)
-#define PROGRAM_VERSION "2.5.5"
+#define PROGRAM_VERSION "2.5.6"
/* update when remote agent API or behaviour changes */
#define AGENT_PROTOCOL_VERSION 20501
@@ -566,6 +566,8 @@ typedef struct pgRestoreParams
/* options for partial restore */
PartialRestoreType partial_restore_type;
parray *partial_db_list;
+
+ char* waldir;
} pgRestoreParams;
/* Options needed for set-backup command */
@@ -1010,8 +1012,9 @@ extern CompressAlg parse_compress_alg(const char *arg);
extern const char* deparse_compress_alg(int alg);
/* in dir.c */
-extern bool get_control_value(const char *str, const char *name,
- char *value_str, int64 *value_int64, bool is_mandatory);
+extern bool get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory);
+extern bool get_control_value_str(const char *str, const char *name,
+ char *value_str, size_t value_str_size, bool is_mandatory);
extern void dir_list_file(parray *files, const char *root, bool exclude,
bool follow_symlink, bool add_root, bool backup_logs,
bool skip_hidden, int external_dir_num, fio_location location);
@@ -1022,7 +1025,8 @@ extern void create_data_directories(parray *dest_files,
const char *backup_dir,
bool extract_tablespaces,
bool incremental,
- fio_location location);
+ fio_location location,
+ const char *waldir_path);
extern void read_tablespace_map(parray *links, const char *backup_dir);
extern void opt_tablespace_map(ConfigOption *opt, const char *arg);
diff --git a/src/restore.c b/src/restore.c
index d8d808a4..fbf0c039 100644
--- a/src/restore.c
+++ b/src/restore.c
@@ -801,7 +801,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
create_data_directories(dest_files, instance_config.pgdata,
dest_backup->root_dir, backup_has_tblspc,
params->incremental_mode != INCR_NONE,
- FIO_DB_HOST);
+ FIO_DB_HOST, params->waldir);
/*
* Restore dest_backup external directories.
diff --git a/src/utils/file.c b/src/utils/file.c
index 7d1df554..7103c8f1 100644
--- a/src/utils/file.c
+++ b/src/utils/file.c
@@ -489,8 +489,10 @@ fio_disconnect(void)
Assert(hdr.cop == FIO_DISCONNECTED);
SYS_CHECK(close(fio_stdin));
SYS_CHECK(close(fio_stdout));
+ SYS_CHECK(close(fio_stderr));
fio_stdin = 0;
fio_stdout = 0;
+ fio_stderr = 0;
wait_ssh();
}
}
@@ -3403,7 +3405,8 @@ fio_communicate(int in, int out)
case FIO_DISCONNECT:
hdr.cop = FIO_DISCONNECTED;
IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));
- break;
+ free(buf);
+ return;
case FIO_GET_ASYNC_ERROR:
fio_get_async_error_impl(out);
break;
diff --git a/src/utils/remote.c b/src/utils/remote.c
index 2bfd24d1..046ebd81 100644
--- a/src/utils/remote.c
+++ b/src/utils/remote.c
@@ -147,6 +147,9 @@ bool launch_agent(void)
ssh_argv[ssh_argc++] = "-o";
ssh_argv[ssh_argc++] = "Compression=no";
+ ssh_argv[ssh_argc++] = "-o";
+ ssh_argv[ssh_argc++] = "ControlMaster=no";
+
ssh_argv[ssh_argc++] = "-o";
ssh_argv[ssh_argc++] = "LogLevel=error";
diff --git a/tests/Readme.md b/tests/Readme.md
index 668552c9..11c5272f 100644
--- a/tests/Readme.md
+++ b/tests/Readme.md
@@ -1,4 +1,4 @@
-[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup)
+****[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup)
```
Note: For now these tests work on Linux and "kinda" work on Windows
@@ -31,7 +31,7 @@ Remote backup depends on key authentication to local machine via ssh as current
export PGPROBACKUP_SSH_REMOTE=ON
Run tests that are relied on advanced debugging features. For this mode, pg_probackup should be compiled without optimizations. For example:
-CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests
+CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls
export PGPROBACKUP_GDB=ON
@@ -41,6 +41,8 @@ Run suit of basic simple tests:
Run ptrack tests:
export PG_PROBACKUP_PTRACK=ON
+Run long (time consuming) tests:
+ export PG_PROBACKUP_LONG=ON
Usage:
sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope
@@ -48,3 +50,20 @@ Usage:
export PG_CONFIG=/path/to/pg_config
python -m unittest [-v] tests[.specific_module][.class.test]
```
+
+# Troubleshooting FAQ
+
+## Python tests failure
+### 1. Could not open extension "..."
+```
+testgres.exceptions.QueryException ERROR: could not open extension control file "/share/extension/amcheck.control": No such file or directory
+```
+
+#### Solution:
+
+You have no `/contrib/...` extension installed, please do
+
+```commandline
+cd
+make install-world
+```
diff --git a/tests/__init__.py b/tests/__init__.py
index 55d6ea9b..79537ad7 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -7,7 +7,7 @@ from . import init, merge, option, show, compatibility, \
compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \
cfs_validate_backup, auth_test, time_stamp, logging, \
locking, remote, external, config, checkdb, set_backup, incr_restore, \
- catchup, CVE_2018_1058
+ catchup, CVE_2018_1058, time_consuming
def load_tests(loader, tests, pattern):
@@ -21,6 +21,12 @@ def load_tests(loader, tests, pattern):
if os.environ['PG_PROBACKUP_PTRACK'] == 'ON':
suite.addTests(loader.loadTestsFromModule(ptrack))
+ # PG_PROBACKUP_LONG section for tests that are long
+ # by design e.g. they contain loops, sleeps and so on
+ if 'PG_PROBACKUP_LONG' in os.environ:
+ if os.environ['PG_PROBACKUP_LONG'] == 'ON':
+ suite.addTests(loader.loadTestsFromModule(time_consuming))
+
# suite.addTests(loader.loadTestsFromModule(auth_test))
suite.addTests(loader.loadTestsFromModule(archive))
suite.addTests(loader.loadTestsFromModule(backup))
diff --git a/tests/archive.py b/tests/archive.py
index 22b9d869..52fb225e 100644
--- a/tests/archive.py
+++ b/tests/archive.py
@@ -228,6 +228,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
Check pg_stop_backup_timeout, needed backup_timeout
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -290,6 +292,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
Check pg_stop_backup_timeout, libpq-timeout requested.
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -313,7 +317,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
gdb.set_breakpoint('pg_stop_backup')
gdb.run_until_break()
- self.set_auto_conf(node, {'archive_command': "'exit 1'"})
+ self.set_auto_conf(node, {'archive_command': 'exit 1'})
node.reload()
os.environ["PGAPPNAME"] = "foo"
diff --git a/tests/backup.py b/tests/backup.py
index b7fc4a92..ee9ddf91 100644
--- a/tests/backup.py
+++ b/tests/backup.py
@@ -1095,6 +1095,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_drop_rel_during_full_backup(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1244,6 +1246,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_drop_rel_during_backup_delta(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1313,6 +1317,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_drop_rel_during_backup_page(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1445,6 +1451,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_backup_concurrent_drop_table(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1579,6 +1587,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_sigint_handling(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1618,6 +1628,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_sigterm_handling(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1656,6 +1668,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_sigquit_handling(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -2905,6 +2919,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_missing_wal_segment(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -3295,6 +3311,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_backup_atexit(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
diff --git a/tests/catchup.py b/tests/catchup.py
index 8441deaa..a83755c5 100644
--- a/tests/catchup.py
+++ b/tests/catchup.py
@@ -1455,3 +1455,157 @@ class CatchupTest(ProbackupTest, unittest.TestCase):
dst_pg.stop()
#self.assertEqual(1, 0, 'Stop test')
self.del_test_dir(module_name, self.fname)
+
+#########################################
+# --dry-run
+#########################################
+ def test_dry_run_catchup_full(self):
+ """
+ Test dry-run option for full catchup
+ """
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True
+ )
+ src_pg.slow_start()
+
+ # preparation 2: make clean shutdowned lagging behind replica
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+
+ src_pg.pgbench_init(scale = 10)
+ pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
+ pgbench.wait()
+
+ # save the condition before dry-run
+ content_before = self.pgdata_content(dst_pg.data_dir)
+
+ # do full catchup
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run']
+ )
+
+ # compare data dirs before and after catchup
+ self.compare_pgdata(
+ content_before,
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_dry_run_catchup_ptrack(self):
+ """
+ Test dry-run option for catchup in incremental ptrack mode
+ """
+ if not self.ptrack:
+ return unittest.skip('Skipped because ptrack support is disabled')
+
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ ptrack_enable = True,
+ initdb_params = ['--data-checksums']
+ )
+ src_pg.slow_start()
+ src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack")
+
+ src_pg.pgbench_init(scale = 10)
+ pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
+ pgbench.wait()
+
+ # preparation 2: make clean shutdowned lagging behind replica
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.set_replica(src_pg, dst_pg)
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start(replica = True)
+ dst_pg.stop()
+
+ # save the condition before dry-run
+ content_before = self.pgdata_content(dst_pg.data_dir)
+
+ # do incremental catchup
+ self.catchup_node(
+ backup_mode = 'PTRACK',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run']
+ )
+
+ # compare data dirs before and after cathup
+ self.compare_pgdata(
+ content_before,
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
+ def test_dry_run_catchup_delta(self):
+ """
+ Test dry-run option for catchup in incremental delta mode
+ """
+
+ # preparation 1: source
+ src_pg = self.make_simple_node(
+ base_dir = os.path.join(module_name, self.fname, 'src'),
+ set_replication = True,
+ initdb_params = ['--data-checksums'],
+ pg_options = { 'wal_log_hints': 'on' }
+ )
+ src_pg.slow_start()
+
+ src_pg.pgbench_init(scale = 10)
+ pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
+ pgbench.wait()
+
+ # preparation 2: make clean shutdowned lagging behind replica
+ dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
+ self.catchup_node(
+ backup_mode = 'FULL',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
+ )
+ self.set_replica(src_pg, dst_pg)
+ dst_options = {}
+ dst_options['port'] = str(dst_pg.port)
+ self.set_auto_conf(dst_pg, dst_options)
+ dst_pg.slow_start(replica = True)
+ dst_pg.stop()
+
+ # save the condition before dry-run
+ content_before = self.pgdata_content(dst_pg.data_dir)
+
+ # do delta catchup
+ self.catchup_node(
+ backup_mode = 'DELTA',
+ source_pgdata = src_pg.data_dir,
+ destination_node = dst_pg,
+ options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', "--dry-run"]
+ )
+
+ # compare data dirs before and after cathup
+ self.compare_pgdata(
+ content_before,
+ self.pgdata_content(dst_pg.data_dir)
+ )
+
+ # Cleanup
+ src_pg.stop()
+ self.del_test_dir(module_name, self.fname)
+
diff --git a/tests/checkdb.py b/tests/checkdb.py
index fec2e792..2df946cf 100644
--- a/tests/checkdb.py
+++ b/tests/checkdb.py
@@ -17,6 +17,8 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_checkdb_amcheck_only_sanity(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -544,11 +546,8 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_checkdb_sigint_handling(self):
""""""
- if not self.gdb:
- self.skipTest(
- "Specify PGPROBACKUP_GDB and build without "
- "optimizations for run this test"
- )
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
diff --git a/tests/delta.py b/tests/delta.py
index f365b6f9..82fb714f 100644
--- a/tests/delta.py
+++ b/tests/delta.py
@@ -472,11 +472,8 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
make node, make full and delta stream backups,
restore them and check data correctness
"""
- if not self.gdb:
- self.skipTest(
- "Specify PGPROBACKUP_GDB and build without "
- "optimizations for run this test"
- )
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
diff --git a/tests/exclude.py b/tests/exclude.py
index b98a483d..2c492588 100644
--- a/tests/exclude.py
+++ b/tests/exclude.py
@@ -203,8 +203,10 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_exclude_unlogged_tables_2(self):
"""
- make node, create unlogged, take FULL, check
- that unlogged was not backed up
+ 1. make node, create unlogged, take FULL, DELTA, PAGE,
+ check that unlogged table files was not backed up
+ 2. restore FULL, DELTA, PAGE to empty db,
+ ensure unlogged table exist and is epmty
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@@ -220,6 +222,8 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
+ backup_ids = []
+
for backup_type in ['full', 'delta', 'page']:
if backup_type == 'full':
@@ -231,14 +235,16 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
'postgres',
'insert into test select generate_series(0,20050000)::text')
- rel_path = node.safe_psql(
+ rel_path = node.execute(
'postgres',
- "select pg_relation_filepath('test')").decode('utf-8').rstrip()
+ "select pg_relation_filepath('test')")[0][0]
backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type=backup_type, options=['--stream'])
+ backup_ids.append(backup_id)
+
filelist = self.get_backup_filelist(
backup_dir, 'node', backup_id)
@@ -258,9 +264,25 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
rel_path + '.3', filelist,
"Unlogged table was not excluded")
+ # ensure restoring retrieves back only empty unlogged table
+ for backup_id in backup_ids:
+ node.stop()
+ node.cleanup()
+
+ self.restore_node(backup_dir, 'node', node, backup_id=backup_id)
+
+ node.slow_start()
+
+ self.assertEqual(
+ node.execute(
+ 'postgres',
+ 'select count(*) from test')[0][0],
+ 0)
+
# Clean after yourself
self.del_test_dir(module_name, fname)
+
# @unittest.skip("skip")
def test_exclude_log_dir(self):
"""
diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out
index a8b4a64b..65916425 100644
--- a/tests/expected/option_help.out
+++ b/tests/expected/option_help.out
@@ -86,6 +86,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[-T OLDDIR=NEWDIR] [--progress]
[--external-mapping=OLDDIR=NEWDIR]
[--skip-external-dirs] [--no-sync]
+ [-X WALDIR | --waldir=WALDIR]
[-I | --incremental-mode=none|checksum|lsn]
[--db-include | --db-exclude]
[--remote-proto] [--remote-host]
@@ -178,7 +179,8 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
+ [--dry-run]
[--help]
-Read the website for details.
+Read the website for details .
Report bugs to .
diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out
new file mode 100644
index 00000000..2e90eb29
--- /dev/null
+++ b/tests/expected/option_help_ru.out
@@ -0,0 +1,186 @@
+
+pg_probackup - утилита для управления резервным копированием/восстановлением базы данных PostgreSQL.
+
+ pg_probackup help [COMMAND]
+
+ pg_probackup version
+
+ pg_probackup init -B backup-path
+
+ pg_probackup set-config -B backup-path --instance=instance_name
+ [-D pgdata-path]
+ [--external-dirs=external-directories-paths]
+ [--log-level-console=log-level-console]
+ [--log-level-file=log-level-file]
+ [--log-filename=log-filename]
+ [--error-log-filename=error-log-filename]
+ [--log-directory=log-directory]
+ [--log-rotation-size=log-rotation-size]
+ [--log-rotation-age=log-rotation-age]
+ [--retention-redundancy=retention-redundancy]
+ [--retention-window=retention-window]
+ [--wal-depth=wal-depth]
+ [--compress-algorithm=compress-algorithm]
+ [--compress-level=compress-level]
+ [--archive-timeout=timeout]
+ [-d dbname] [-h host] [-p port] [-U username]
+ [--remote-proto] [--remote-host]
+ [--remote-port] [--remote-path] [--remote-user]
+ [--ssh-options]
+ [--restore-command=cmdline] [--archive-host=destination]
+ [--archive-port=port] [--archive-user=username]
+ [--help]
+
+ pg_probackup set-backup -B backup-path --instance=instance_name
+ -i backup-id [--ttl=interval] [--expire-time=timestamp]
+ [--note=text]
+ [--help]
+
+ pg_probackup show-config -B backup-path --instance=instance_name
+ [--format=format]
+ [--help]
+
+ pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
+ [-D pgdata-path] [-C]
+ [--stream [-S slot-name] [--temp-slot]]
+ [--backup-pg-log] [-j num-threads] [--progress]
+ [--no-validate] [--skip-block-validation]
+ [--external-dirs=external-directories-paths]
+ [--no-sync]
+ [--log-level-console=log-level-console]
+ [--log-level-file=log-level-file]
+ [--log-filename=log-filename]
+ [--error-log-filename=error-log-filename]
+ [--log-directory=log-directory]
+ [--log-rotation-size=log-rotation-size]
+ [--log-rotation-age=log-rotation-age] [--no-color]
+ [--delete-expired] [--delete-wal] [--merge-expired]
+ [--retention-redundancy=retention-redundancy]
+ [--retention-window=retention-window]
+ [--wal-depth=wal-depth]
+ [--compress]
+ [--compress-algorithm=compress-algorithm]
+ [--compress-level=compress-level]
+ [--archive-timeout=archive-timeout]
+ [-d dbname] [-h host] [-p port] [-U username]
+ [-w --no-password] [-W --password]
+ [--remote-proto] [--remote-host]
+ [--remote-port] [--remote-path] [--remote-user]
+ [--ssh-options]
+ [--ttl=interval] [--expire-time=timestamp] [--note=text]
+ [--help]
+
+ pg_probackup restore -B backup-path --instance=instance_name
+ [-D pgdata-path] [-i backup-id] [-j num-threads]
+ [--recovery-target-time=time|--recovery-target-xid=xid
+ |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]
+ [--recovery-target-timeline=timeline]
+ [--recovery-target=immediate|latest]
+ [--recovery-target-name=target-name]
+ [--recovery-target-action=pause|promote|shutdown]
+ [--restore-command=cmdline]
+ [-R | --restore-as-replica] [--force]
+ [--primary-conninfo=primary_conninfo]
+ [-S | --primary-slot-name=slotname]
+ [--no-validate] [--skip-block-validation]
+ [-T OLDDIR=NEWDIR] [--progress]
+ [--external-mapping=OLDDIR=NEWDIR]
+ [--skip-external-dirs] [--no-sync]
+ [-X WALDIR | --waldir=WALDIR]
+ [-I | --incremental-mode=none|checksum|lsn]
+ [--db-include | --db-exclude]
+ [--remote-proto] [--remote-host]
+ [--remote-port] [--remote-path] [--remote-user]
+ [--ssh-options]
+ [--archive-host=hostname]
+ [--archive-port=port] [--archive-user=username]
+ [--help]
+
+ pg_probackup validate -B backup-path [--instance=instance_name]
+ [-i backup-id] [--progress] [-j num-threads]
+ [--recovery-target-time=time|--recovery-target-xid=xid
+ |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]
+ [--recovery-target-timeline=timeline]
+ [--recovery-target-name=target-name]
+ [--skip-block-validation]
+ [--help]
+
+ pg_probackup checkdb [-B backup-path] [--instance=instance_name]
+ [-D pgdata-path] [--progress] [-j num-threads]
+ [--amcheck] [--skip-block-validation]
+ [--heapallindexed] [--checkunique]
+ [--help]
+
+ pg_probackup show -B backup-path
+ [--instance=instance_name [-i backup-id]]
+ [--format=format] [--archive]
+ [--no-color] [--help]
+
+ pg_probackup delete -B backup-path --instance=instance_name
+ [-j num-threads] [--progress]
+ [--retention-redundancy=retention-redundancy]
+ [--retention-window=retention-window]
+ [--wal-depth=wal-depth]
+ [-i backup-id | --delete-expired | --merge-expired | --status=backup_status]
+ [--delete-wal]
+ [--dry-run] [--no-validate] [--no-sync]
+ [--help]
+
+ pg_probackup merge -B backup-path --instance=instance_name
+ -i backup-id [--progress] [-j num-threads]
+ [--no-validate] [--no-sync]
+ [--help]
+
+ pg_probackup add-instance -B backup-path -D pgdata-path
+ --instance=instance_name
+ [--external-dirs=external-directories-paths]
+ [--remote-proto] [--remote-host]
+ [--remote-port] [--remote-path] [--remote-user]
+ [--ssh-options]
+ [--help]
+
+ pg_probackup del-instance -B backup-path
+ --instance=instance_name
+ [--help]
+
+ pg_probackup archive-push -B backup-path --instance=instance_name
+ --wal-file-name=wal-file-name
+ [--wal-file-path=wal-file-path]
+ [-j num-threads] [--batch-size=batch_size]
+ [--archive-timeout=timeout]
+ [--no-ready-rename] [--no-sync]
+ [--overwrite] [--compress]
+ [--compress-algorithm=compress-algorithm]
+ [--compress-level=compress-level]
+ [--remote-proto] [--remote-host]
+ [--remote-port] [--remote-path] [--remote-user]
+ [--ssh-options]
+ [--help]
+
+ pg_probackup archive-get -B backup-path --instance=instance_name
+ --wal-file-path=wal-file-path
+ --wal-file-name=wal-file-name
+ [-j num-threads] [--batch-size=batch_size]
+ [--no-validate-wal]
+ [--remote-proto] [--remote-host]
+ [--remote-port] [--remote-path] [--remote-user]
+ [--ssh-options]
+ [--help]
+
+ pg_probackup catchup -b catchup-mode
+ --source-pgdata=path_to_pgdata_on_remote_server
+ --destination-pgdata=path_to_local_dir
+ [--stream [-S slot-name] [--temp-slot | --perm-slot]]
+ [-j num-threads]
+ [-T OLDDIR=NEWDIR]
+ [--exclude-path=path_prefix]
+ [-d dbname] [-h host] [-p port] [-U username]
+ [-w --no-password] [-W --password]
+ [--remote-proto] [--remote-host]
+ [--remote-port] [--remote-path] [--remote-user]
+ [--ssh-options]
+ [--dry-run]
+ [--help]
+
+Подробнее читайте на сайте .
+Сообщайте об ошибках в .
diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out
index 29cd93f4..96f0f344 100644
--- a/tests/expected/option_version.out
+++ b/tests/expected/option_version.out
@@ -1 +1 @@
-pg_probackup 2.5.5
\ No newline at end of file
+pg_probackup 2.5.6
diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py
index 8da80219..e840e115 100644
--- a/tests/helpers/ptrack_helpers.py
+++ b/tests/helpers/ptrack_helpers.py
@@ -89,11 +89,7 @@ def dir_files(base_dir):
def is_enterprise():
# pg_config --help
- if os.name == 'posix':
- cmd = [os.environ['PG_CONFIG'], '--pgpro-edition']
-
- elif os.name == 'nt':
- cmd = [[os.environ['PG_CONFIG']], ['--pgpro-edition']]
+ cmd = [os.environ['PG_CONFIG'], '--help']
p = subprocess.Popen(
cmd,
@@ -102,6 +98,18 @@ def is_enterprise():
)
return b'postgrespro.ru' in p.communicate()[0]
+
+def is_nls_enabled():
+ cmd = [os.environ['PG_CONFIG'], '--configure']
+
+ p = subprocess.Popen(
+ cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+ return b'enable-nls' in p.communicate()[0]
+
+
class ProbackupException(Exception):
def __init__(self, message, cmd):
self.message = message
@@ -147,6 +155,7 @@ def slow_start(self, replica=False):
class ProbackupTest(object):
# Class attributes
enterprise = is_enterprise()
+ enable_nls = is_nls_enabled()
def __init__(self, *args, **kwargs):
super(ProbackupTest, self).__init__(*args, **kwargs)
@@ -180,8 +189,8 @@ class ProbackupTest(object):
self.test_env['LC_MESSAGES'] = 'C'
self.test_env['LC_TIME'] = 'C'
- self.gdb = 'PGPROBACKUP_GDB' in os.environ and \
- os.environ['PGPROBACKUP_GDB'] == 'ON'
+ self.gdb = 'PGPROBACKUP_GDB' in self.test_env and \
+ self.test_env['PGPROBACKUP_GDB'] == 'ON'
self.paranoia = 'PG_PROBACKUP_PARANOIA' in self.test_env and \
self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON'
@@ -810,7 +819,7 @@ class ProbackupTest(object):
if self.verbose:
print(self.cmd)
if gdb:
- return GDBobj([binary_path] + command, self.verbose)
+ return GDBobj([binary_path] + command, self)
if asynchronous:
return subprocess.Popen(
[binary_path] + command,
@@ -1861,22 +1870,34 @@ class ProbackupTest(object):
self.assertFalse(fail, error_message)
def gdb_attach(self, pid):
- return GDBobj([str(pid)], self.verbose, attach=True)
+ return GDBobj([str(pid)], self, attach=True)
+
+ def _check_gdb_flag_or_skip_test(self):
+ if not self.gdb:
+ self.skipTest(
+ "Specify PGPROBACKUP_GDB and build without "
+ "optimizations for run this test"
+ )
class GdbException(Exception):
- def __init__(self, message=False):
+ def __init__(self, message="False"):
self.message = message
def __str__(self):
return '\n ERROR: {0}\n'.format(repr(self.message))
-class GDBobj(ProbackupTest):
- def __init__(self, cmd, verbose, attach=False):
- self.verbose = verbose
+class GDBobj:
+ def __init__(self, cmd, env, attach=False):
+ self.verbose = env.verbose
self.output = ''
+ # Check gdb flag is set up
+ if not env.gdb:
+ raise GdbException("No `PGPROBACKUP_GDB=on` is set, "
+ "test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start "
+ "and be skipped")
# Check gdb presense
try:
gdb_version, _ = subprocess.Popen(
diff --git a/tests/locking.py b/tests/locking.py
index ef7aa1f2..0fe954ca 100644
--- a/tests/locking.py
+++ b/tests/locking.py
@@ -17,6 +17,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
run validate, expect it to successfully executed,
concurrent RUNNING backup with pid file and active process is legal
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -72,6 +74,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
RUNNING backup with pid file AND without active pid is legal,
but his status must be changed to ERROR and pid file is deleted
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -142,6 +146,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
RUNNING backup with pid file AND without active pid is legal,
but his status must be changed to ERROR and pid file is deleted
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -240,6 +246,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
RUNNING backup without pid file AND without active pid is legal,
his status must be changed to ERROR
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -310,6 +318,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
Expect restore to sucseed because read-only locks
do not conflict
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -352,6 +362,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
Expect restore to fail because validation of
intermediate backup is impossible
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -443,6 +455,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
and stop it in the middle, delete full backup.
Expect it to fail.
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -585,6 +599,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
"""
Make sure that shared lock leaves no files with pids
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
diff --git a/tests/logging.py b/tests/logging.py
index 47143cfb..70ebcf6d 100644
--- a/tests/logging.py
+++ b/tests/logging.py
@@ -12,6 +12,10 @@ class LogTest(ProbackupTest, unittest.TestCase):
# @unittest.expectedFailure
# PGPRO-2154
def test_log_rotation(self):
+ """
+ """
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
diff --git a/tests/merge.py b/tests/merge.py
index fe0927f4..5f092543 100644
--- a/tests/merge.py
+++ b/tests/merge.py
@@ -975,6 +975,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
"""
Check that failed MERGE can be continued
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1051,6 +1053,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
"""
Fail merge via gdb, corrupt DELTA backup, try to continue merge
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1148,6 +1152,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
"""
Check that failed MERGE on delete can be continued
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1219,6 +1225,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
Check that failed MERGE cannot be continued if intermediate
backup is missing.
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1409,6 +1417,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
check that crashing after opening backup.control
for writing will not result in losing backup metadata
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1461,6 +1471,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
for writing will not result in losing metadata about backup files
TODO: rewrite
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1552,6 +1564,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
for writing will not result in losing metadata about backup files
TODO: rewrite
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1639,6 +1653,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_failed_merge_after_delete(self):
"""
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1720,6 +1736,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_failed_merge_after_delete_1(self):
"""
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1796,6 +1814,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_failed_merge_after_delete_2(self):
"""
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -1858,6 +1878,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_failed_merge_after_delete_3(self):
"""
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -2281,6 +2303,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_idempotent_merge(self):
"""
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -2580,6 +2604,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
page header map cannot be trusted when
running retry
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -2626,6 +2652,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_missing_data_file(self):
"""
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -2684,6 +2712,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_missing_non_data_file(self):
"""
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -2741,6 +2771,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_merge_remote_mode(self):
"""
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
diff --git a/tests/option.py b/tests/option.py
index 023a0c2c..88e72ffd 100644
--- a/tests/option.py
+++ b/tests/option.py
@@ -1,6 +1,7 @@
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
+import locale
module_name = 'option'
@@ -23,7 +24,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
"""help options"""
with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out:
self.assertIn(
- version_out.read().decode("utf-8"),
+ version_out.read().decode("utf-8").strip(),
self.run_pb(["--version"])
)
@@ -226,3 +227,17 @@ class OptionTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
+
+ # @unittest.skip("skip")
+ def test_help_6(self):
+ """help options"""
+ if ProbackupTest.enable_nls:
+ self.test_env['LC_ALL'] = 'ru_RU.utf-8'
+ with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out:
+ self.assertEqual(
+ self.run_pb(["--help"]),
+ help_out.read().decode("utf-8")
+ )
+ else:
+ return unittest.skip(
+ 'You need configure PostgreSQL with --enabled-nls option for this test')
diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py
index a80d317d..3baa0ba0 100644
--- a/tests/pgpro2068.py
+++ b/tests/pgpro2068.py
@@ -18,11 +18,8 @@ class BugTest(ProbackupTest, unittest.TestCase):
"""
https://jira.postgrespro.ru/browse/PGPRO-2068
"""
- if not self.gdb:
- self.skipTest(
- "Specify PGPROBACKUP_GDB and build without "
- "optimizations for run this test"
- )
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
diff --git a/tests/ptrack.py b/tests/ptrack.py
index 5ecc669b..9741c956 100644
--- a/tests/ptrack.py
+++ b/tests/ptrack.py
@@ -822,6 +822,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
def test_ptrack_vacuum_full(self):
"""make node, make full and ptrack stream backups,
restore them and check data correctness"""
+ self._check_gdb_flag_or_skip_test()
+
backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, self.fname, 'node'),
diff --git a/tests/replica.py b/tests/replica.py
index 45eed3fb..acf655aa 100644
--- a/tests/replica.py
+++ b/tests/replica.py
@@ -539,11 +539,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
start backup from replica, during backup promote replica
check that backup is failed
"""
- if not self.gdb:
- self.skipTest(
- "Specify PGPROBACKUP_GDB and build without "
- "optimizations for run this test"
- )
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@@ -634,11 +631,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
def test_replica_stop_lsn_null_offset(self):
"""
"""
- if not self.gdb:
- self.skipTest(
- "Specify PGPROBACKUP_GDB and build without "
- "optimizations for run this test"
- )
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@@ -722,11 +716,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
def test_replica_stop_lsn_null_offset_next_record(self):
"""
"""
- if not self.gdb:
- self.skipTest(
- "Specify PGPROBACKUP_GDB and build without "
- "optimizations for run this test"
- )
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@@ -749,7 +740,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# freeze bgwriter to get rid of RUNNING XACTS records
bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0]
- gdb_checkpointer = self.gdb_attach(bgwriter_pid)
self.backup_node(backup_dir, 'master', master)
@@ -828,6 +818,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
def test_archive_replica_null_offset(self):
"""
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@@ -998,11 +990,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
make archive master, take full and page archive backups from master,
set replica, make archive backup from replica
"""
- if not self.gdb:
- self.skipTest(
- "Specify PGPROBACKUP_GDB and build without "
- "optimizations for run this test"
- )
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@@ -1104,11 +1093,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
def test_start_stop_lsn_in_the_same_segno(self):
"""
"""
- if not self.gdb:
- self.skipTest(
- "Specify PGPROBACKUP_GDB and build without "
- "optimizations for run this test"
- )
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@@ -1131,7 +1116,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# freeze bgwriter to get rid of RUNNING XACTS records
bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0]
- gdb_checkpointer = self.gdb_attach(bgwriter_pid)
self.backup_node(backup_dir, 'master', master, options=['--stream'])
diff --git a/tests/restore.py b/tests/restore.py
index a9fe869e..37f13357 100644
--- a/tests/restore.py
+++ b/tests/restore.py
@@ -2379,6 +2379,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_restore_concurrent_drop_table(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -3796,6 +3798,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_concurrent_restore(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -3915,3 +3919,59 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
+
+ # @unittest.skip("skip")
+ def test_restore_with_waldir(self):
+ """recovery using tablespace-mapping option and page backup"""
+ fname = self.id().split('.')[3]
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, fname, 'node'),
+ initdb_params=['--data-checksums'])
+
+ backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ self.set_archiving(backup_dir, 'node', node)
+ node.slow_start()
+
+
+ with node.connect("postgres") as con:
+ con.execute(
+ "CREATE TABLE tbl AS SELECT * "
+ "FROM generate_series(0,3) AS integer")
+ con.commit()
+
+ # Full backup
+ backup_id = self.backup_node(backup_dir, 'node', node)
+
+ node.stop()
+ node.cleanup()
+
+ # Create waldir
+ waldir_path = os.path.join(node.base_dir, "waldir")
+ os.makedirs(waldir_path)
+
+ # Test recovery from latest
+ self.assertIn(
+ "INFO: Restore of backup {0} completed.".format(backup_id),
+ self.restore_node(
+ backup_dir, 'node', node,
+ options=[
+ "-X", "%s" % (waldir_path)]),
+ '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
+ repr(self.output), self.cmd))
+ node.slow_start()
+
+ count = node.execute("postgres", "SELECT count(*) FROM tbl")
+ self.assertEqual(count[0][0], 4)
+
+ # check pg_wal is symlink
+ if node.major_version >= 10:
+ wal_path=os.path.join(node.data_dir, "pg_wal")
+ else:
+ wal_path=os.path.join(node.data_dir, "pg_xlog")
+
+ self.assertEqual(os.path.islink(wal_path), True)
+
+ # Clean after yourself
+ self.del_test_dir(module_name, fname)
diff --git a/tests/retention.py b/tests/retention.py
index 19204807..b0399a23 100644
--- a/tests/retention.py
+++ b/tests/retention.py
@@ -1499,6 +1499,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
FULL
-------window
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -1546,6 +1548,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
FULL
-------window
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -1588,6 +1592,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
def test_retention_redundancy_overlapping_chains(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -1636,6 +1642,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
def test_retention_redundancy_overlapping_chains_1(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -1744,6 +1752,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
"""
Check that retention purge works correctly with MERGING backups
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -2536,6 +2546,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
"""
https://github.com/postgrespro/pg_probackup/issues/328
"""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
diff --git a/tests/time_consuming.py b/tests/time_consuming.py
new file mode 100644
index 00000000..396ab716
--- /dev/null
+++ b/tests/time_consuming.py
@@ -0,0 +1,76 @@
+import os
+import unittest
+from .helpers.ptrack_helpers import ProbackupTest
+import subprocess
+from time import sleep
+
+module_name = 'time_consuming'
+
+class TimeConsumingTests(ProbackupTest, unittest.TestCase):
+ def test_pbckp150(self):
+ """
+ https://jira.postgrespro.ru/browse/PBCKP-150
+ create a node filled with pgbench
+ create FULL backup followed by PTRACK backup
+ run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel
+ """
+ # init node
+ fname = self.id().split('.')[3]
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, fname, 'node'),
+ set_replication=True,
+ initdb_params=['--data-checksums'])
+ node.append_conf('postgresql.conf',
+ """
+ max_connections = 100
+ wal_keep_size = 16000
+ ptrack.map_size = 1
+ shared_preload_libraries='ptrack'
+ log_statement = 'none'
+ fsync = off
+ log_checkpoints = on
+ autovacuum = off
+ """)
+
+ # init probackup and add an instance
+ backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+
+ # run the node and init ptrack
+ node.slow_start()
+ node.safe_psql("postgres", "CREATE EXTENSION ptrack")
+ # populate it with pgbench
+ node.pgbench_init(scale=5)
+
+ # FULL backup followed by PTRACK backup
+ self.backup_node(backup_dir, 'node', node, options=['--stream'])
+ self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
+
+ # run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel
+ nBenchDuration = 30
+ pgbench = node.pgbench(options=['-c', '20', '-j', '8', '-T', str(nBenchDuration)])
+ with open('/tmp/pbckp150vacuum.sql', 'w') as f:
+ f.write('VACUUM (FULL) pgbench_accounts, pgbench_tellers, pgbench_history; SELECT pg_sleep(1);\n')
+ pgbenchval = node.pgbench(options=['-c', '1', '-f', '/tmp/pbckp150vacuum.sql', '-T', str(nBenchDuration)])
+
+ # several PTRACK backups
+ for i in range(nBenchDuration):
+ print("[{}] backing up PTRACK diff...".format(i+1))
+ self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE'])
+ sleep(0.1)
+ # if the activity pgbench has finished, stop backing up
+ if pgbench.poll() is not None:
+ break
+
+ pgbench.kill()
+ pgbenchval.kill()
+ pgbench.wait()
+ pgbenchval.wait()
+
+ backups = self.show_pb(backup_dir, 'node')
+ for b in backups:
+ self.assertEqual("OK", b['status'])
+
+ # Clean after yourself
+ self.del_test_dir(module_name, fname)
diff --git a/tests/validate.py b/tests/validate.py
index 0b04d92f..22a03c3b 100644
--- a/tests/validate.py
+++ b/tests/validate.py
@@ -2,6 +2,7 @@ import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
+from pathlib import Path
import subprocess
from sys import exit
import time
@@ -58,7 +59,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
with open(log_file_path) as f:
log_content = f.read()
self.assertIn(
- 'File: "{0}" blknum 1, empty page'.format(file),
+ 'File: "{0}" blknum 1, empty page'.format(Path(file).as_posix()),
log_content,
'Failed to detect nullified block')
@@ -1088,11 +1089,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
"""
check that interrupt during validation is handled correctly
"""
- if not self.gdb:
- self.skipTest(
- "Specify PGPROBACKUP_GDB and build without "
- "optimizations for run this test"
- )
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@@ -3564,6 +3562,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_validation_after_backup(self):
""""""
+ self._check_gdb_flag_or_skip_test()
+
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@@ -4247,4 +4247,4 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# 715 MAXALIGN(header.compressed_size), in);
# 716 if (read_len != MAXALIGN(header.compressed_size))
# -> 717 elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
-# 718 blknum, file->path, read_len, header.compressed_size);
\ No newline at end of file
+# 718 blknum, file->path, read_len, header.compressed_size);
diff --git a/travis/run_tests.sh b/travis/run_tests.sh
index 52b05105..37614f97 100755
--- a/travis/run_tests.sh
+++ b/travis/run_tests.sh
@@ -47,7 +47,7 @@ cd postgres # Go to postgres dir
if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then
git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff
fi
-CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests
+CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls
make -s -j$(nproc) install
#make -s -j$(nproc) -C 'src/common' install
#make -s -j$(nproc) -C 'src/port' install
@@ -100,11 +100,20 @@ source pyenv/bin/activate
pip3 install testgres
echo "############### Testing:"
+echo PG_PROBACKUP_PARANOIA=${PG_PROBACKUP_PARANOIA}
+echo ARCHIVE_COMPRESSION=${ARCHIVE_COMPRESSION}
+echo PGPROBACKUPBIN_OLD=${PGPROBACKUPBIN_OLD}
+echo PGPROBACKUPBIN=${PGPROBACKUPBIN}
+echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE}
+echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB}
+echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK}
if [ "$MODE" = "basic" ]; then
export PG_PROBACKUP_TEST_BASIC=ON
+ echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC}
python3 -m unittest -v tests
python3 -m unittest -v tests.init
else
+ echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC}
python3 -m unittest -v tests.$MODE
fi