From 22c808312f67a060cda3bb36e5a032784a5810f9 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Thu, 17 Mar 2022 11:33:18 +0300 Subject: [PATCH 01/28] [ci skip] [packaging] Fix CentOS-8 packaging, fix pgpro-std tests --- packaging/pkg/scripts/rpm.sh | 8 ++++++++ packaging/test/scripts/rpm.sh | 11 ++++++++++- packaging/test/scripts/rpm_forks.sh | 20 +++++++++++++++----- 3 files changed, 33 insertions(+), 6 deletions(-) diff --git a/packaging/pkg/scripts/rpm.sh b/packaging/pkg/scripts/rpm.sh index d03915c2..2fec4a70 100755 --- a/packaging/pkg/scripts/rpm.sh +++ b/packaging/pkg/scripts/rpm.sh @@ -20,7 +20,15 @@ ulimit -n 1024 if [ ${DISTRIB} = 'centos' ] ; then sed -i 's|^baseurl=http://|baseurl=https://|g' /etc/yum.repos.d/*.repo + if [ ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi yum update -y + if [ ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi fi # PACKAGES NEEDED diff --git a/packaging/test/scripts/rpm.sh b/packaging/test/scripts/rpm.sh index 92804a7f..3b680699 100755 --- a/packaging/test/scripts/rpm.sh +++ b/packaging/test/scripts/rpm.sh @@ -15,7 +15,16 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then # update of rpm package is broken in rhel-7 (26/12/2022) - yum update -y + #yum update -y + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi + yum update -y + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi fi # yum upgrade -y || echo 'some packages in docker failed to upgrade' # yum install -y sudo diff --git a/packaging/test/scripts/rpm_forks.sh b/packaging/test/scripts/rpm_forks.sh index 0d72040e..d5771169 100755 --- a/packaging/test/scripts/rpm_forks.sh +++ b/packaging/test/scripts/rpm_forks.sh @@ -15,7 +15,15 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then # update of rpm package is broken in rhel-7 (26/12/2022) + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi yum update -y + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi fi if [ ${PBK_EDITION} == 'ent' ]; then @@ -80,11 +88,13 @@ if [ $PBK_EDITION == 'std' ] ; then # install POSTGRESQL # rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-${DISTRIB_VERSION}-x86_64/pgdg-redhat-repo-latest.noarch.rpm - if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then - rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm - else - rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm - fi + #if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then + # rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm + #else + # rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm + #fi + curl -o pgpro-repo-add.sh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/pgpro-repo-add.sh + sh pgpro-repo-add.sh if [[ ${PG_VERSION} == '9.6' ]]; then yum install -y postgrespro${PG_TOG}-server.x86_64 From bdbc8265d45649e803e4dd9ad733250758e33e19 Mon Sep 17 00:00:00 2001 From: japinli Date: Tue, 19 Apr 2022 19:02:20 +0800 Subject: [PATCH 02/28] Fix comparison unsigned expression --- src/data.c | 2 +- src/delete.c | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/data.c b/src/data.c index f02e3fd1..ec42813a 100644 --- a/src/data.c +++ b/src/data.c @@ -2321,7 +2321,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s", to_fullpath, strerror(errno)); { - size_t pos = ftell(out); + long pos = ftell(out); if (pos < 0) elog(ERROR, "Cannot get position in destination file \"%s\": %s", diff --git a/src/delete.c b/src/delete.c index 6c70ff81..b86ed43e 100644 --- a/src/delete.c +++ b/src/delete.c @@ -36,7 +36,7 @@ do_delete(InstanceState *instanceState, time_t backup_id) parray *backup_list, *delete_list; pgBackup *target_backup = NULL; - size_t size_to_delete = 0; + int64 size_to_delete = 0; char size_to_delete_pretty[20]; /* Get complete list of backups */ @@ -682,12 +682,11 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) * at least one backup and no file should be removed. * Unless wal-depth is enabled. */ - if ((tlinfo->closest_backup) && instance_config.wal_depth <= 0) + if ((tlinfo->closest_backup) && instance_config.wal_depth == 0) continue; /* WAL retention keeps this timeline from purge */ - if (instance_config.wal_depth >= 0 && tlinfo->anchor_tli > 0 && - tlinfo->anchor_tli != tlinfo->tli) + if (tlinfo->anchor_tli > 0 && tlinfo->anchor_tli != tlinfo->tli) continue; /* @@ -701,7 +700,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) */ if (tlinfo->oldest_backup) { - if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) + if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) { delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn, tlinfo, instance_config.xlog_seg_size, dry_run); @@ -714,7 +713,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) } else { - if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) + if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn, tlinfo, instance_config.xlog_seg_size, dry_run); else @@ -942,7 +941,7 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->file.name); /* save segment from purging */ - if (instance_config.wal_depth >= 0 && wal_file->keep) + if (wal_file->keep) { elog(VERBOSE, "Retain WAL segment \"%s\"", wal_fullpath); continue; @@ -1027,7 +1026,7 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, parray *backup_list, *delete_list; const char *pretty_status; int n_deleted = 0, n_found = 0; - size_t size_to_delete = 0; + int64 size_to_delete = 0; char size_to_delete_pretty[20]; pgBackup *backup; From 0ae30afe0aa24d970ffd1eb0ca3d3ee6ca32de3d Mon Sep 17 00:00:00 2001 From: japinli Date: Thu, 21 Apr 2022 20:25:28 +0800 Subject: [PATCH 03/28] Fix formattor for ftello --- src/data.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/data.c b/src/data.c index ec42813a..052e1748 100644 --- a/src/data.c +++ b/src/data.c @@ -2030,10 +2030,10 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, return false; /* EOF found */ else if (read_len != 0 && feof(in)) elog(ERROR, - "Odd size page found at offset %lu of \"%s\"", + "Odd size page found at offset %ld of \"%s\"", ftello(in), fullpath); else - elog(ERROR, "Cannot read header at offset %lu of \"%s\": %s", + elog(ERROR, "Cannot read header at offset %ld of \"%s\": %s", ftello(in), fullpath, strerror(errno)); } From 141e96a0e6cdaac8b1e41b871254bdb60005a368 Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Mon, 23 May 2022 15:07:27 +0300 Subject: [PATCH 04/28] [DOC] [PBCKP-128] [skip travis] Describe catchup dry-run flag --- doc/pgprobackup.xml | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 86063b84..cb615fb1 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -3563,6 +3563,14 @@ pg_probackup catchup -b catchup_mode --source-pgdata= of threads with the option: pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --threads=num_threads + + + + Before cloning/synchronising a PostgreSQL instance, you can run the + catchup command with the flag + to estimate the size of data files to be transferred, but make no changes on disk: + +pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --dry-run @@ -3576,7 +3584,7 @@ pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replic Another example shows how you can add a new remote standby server with the PostgreSQL data directory /replica-pgdata by running the catchup command in the FULL mode on four parallel threads: - + pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=FULL --remote-host=remote-hostname --remote-user=remote-unix-username -j 4 @@ -4482,7 +4490,7 @@ pg_probackup archive-get -B backup_dir --instance catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir -[--help] [-j | --threads=num_threads] [--stream] +[--help] [-j | --threads=num_threads] [--stream] [--dry-run] [--temp-slot] [-P | --perm-slot] [-S | --slot=slot_name] [--exclude-path=PATHNAME] [-T OLDDIR=NEWDIR] @@ -4571,6 +4579,19 @@ pg_probackup catchup -b catchup_mode + + + + + Displays the total size of the files to be transferred by catchup. + This flag initiates a trial run of catchup, which does + not actually create, delete or move files on disk. WAL streaming is skipped with . + This flag also allows you to check that + all the options are correct and cloning/synchronising is ready to run. + + + + =path_prefix =path_prefix @@ -4591,17 +4612,6 @@ pg_probackup catchup -b catchup_mode - - - - - Copies the instance in STREAM WAL delivery mode, - including all the necessary WAL files by streaming them from - the instance server via replication protocol. - - - - From 4b2df86d6961937e062c54bb7fd5a4cdf96c1f58 Mon Sep 17 00:00:00 2001 From: Vyacheslav Makarov <50846161+MakSl@users.noreply.github.com> Date: Mon, 23 May 2022 20:13:18 +0300 Subject: [PATCH 05/28] PBCKP-97: added localization of messages * PBCKP-97: Adding localization files Added localization of messages. Fixed some bugs. Added the --enable-nls tag for tests. Added a test to check the localization of messages. Co-authored-by: Vyacheslav Makarov --- .travis.yml | 1 + README.md | 14 + nls.mk | 6 + po/ru.po | 1880 +++++++++++++++++++++++++++++ src/help.c | 4 +- src/pg_probackup.c | 1 + tests/Readme.md | 2 +- tests/expected/option_help.out | 2 +- tests/expected/option_help_ru.out | 184 +++ tests/option.py | 11 + travis/run_tests.sh | 2 +- 11 files changed, 2102 insertions(+), 5 deletions(-) create mode 100644 nls.mk create mode 100644 po/ru.po create mode 100644 tests/expected/option_help_ru.out diff --git a/.travis.yml b/.travis.yml index 66333091..8e325c64 100644 --- a/.travis.yml +++ b/.travis.yml @@ -41,6 +41,7 @@ env: # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=merge +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=option # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=page # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=ptrack # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica diff --git a/README.md b/README.md index 060883a2..5da8d199 100644 --- a/README.md +++ b/README.md @@ -224,3 +224,17 @@ Postgres Professional, Moscow, Russia. ## Credits `pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier. + + +### Localization files (*.po) + +Description of how to add new translation languages. +1. Add a flag --enable-nls in configure. +2. Build postgres. +3. Adding to nls.mk in folder pg_probackup required files in GETTEXT_FILES. +4. In folder pg_probackup do 'make update-po'. +5. As a result, the progname.pot file will be created. Copy the content and add it to the file with the desired language. +6. Adding to nls.mk in folder pg_probackup required language in AVAIL_LANGUAGES. + +For more information, follow the link below: +https://postgrespro.ru/docs/postgresql/12/nls-translator diff --git a/nls.mk b/nls.mk new file mode 100644 index 00000000..981c1c4f --- /dev/null +++ b/nls.mk @@ -0,0 +1,6 @@ +# contrib/pg_probackup/nls.mk +CATALOG_NAME = pg_probackup +AVAIL_LANGUAGES = ru +GETTEXT_FILES = src/help.c +GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS) +GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS) diff --git a/po/ru.po b/po/ru.po new file mode 100644 index 00000000..1263675c --- /dev/null +++ b/po/ru.po @@ -0,0 +1,1880 @@ +# Russian message translation file for pg_probackup +# Copyright (C) 2022 PostgreSQL Global Development Group +# This file is distributed under the same license as the pg_probackup (PostgreSQL) package. +# Vyacheslav Makarov , 2022. +msgid "" +msgstr "" +"Project-Id-Version: pg_probackup (PostgreSQL)\n" +"Report-Msgid-Bugs-To: bugs@postgrespro.ru\n" +"POT-Creation-Date: 2022-04-08 11:33+0300\n" +"PO-Revision-Date: 2022-MO-DA HO:MI+ZONE\n" +"Last-Translator: Vyacheslav Makarov \n" +"Language-Team: Russian \n" +"Language: ru\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" +"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" + +#: src/help.c:84 +#, c-format +msgid "" +"\n" +"%s - utility to manage backup/recovery of PostgreSQL database.\n" +msgstr "" +"\n" +"%s - утилита для управления резервным копированием/восстановлением базы данных PostgreSQL.\n" + +#: src/help.c:86 +#, c-format +msgid "" +"\n" +" %s help [COMMAND]\n" +msgstr "" + +#: src/help.c:88 +#, c-format +msgid "" +"\n" +" %s version\n" +msgstr "" + +#: src/help.c:90 +#, c-format +msgid "" +"\n" +" %s init -B backup-path\n" +msgstr "" + +#: src/help.c:92 +#, c-format +msgid "" +"\n" +" %s set-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:93 src/help.c:791 +#, c-format +msgid " [-D pgdata-path]\n" +msgstr "" + +#: src/help.c:94 src/help.c:130 src/help.c:218 +#, c-format +msgid " [--external-dirs=external-directories-paths]\n" +msgstr "" + +#: src/help.c:95 src/help.c:132 src/help.c:305 src/help.c:731 src/help.c:794 +#, c-format +msgid " [--log-level-console=log-level-console]\n" +msgstr "" + +#: src/help.c:96 src/help.c:133 src/help.c:306 src/help.c:732 src/help.c:795 +#, c-format +msgid " [--log-level-file=log-level-file]\n" +msgstr "" + +#: src/help.c:97 src/help.c:134 src/help.c:307 src/help.c:733 src/help.c:796 +#, c-format +msgid " [--log-filename=log-filename]\n" +msgstr "" + +#: src/help.c:98 src/help.c:135 src/help.c:308 src/help.c:734 src/help.c:797 +#, c-format +msgid " [--error-log-filename=error-log-filename]\n" +msgstr "" + +#: src/help.c:99 src/help.c:136 src/help.c:309 src/help.c:735 src/help.c:798 +#, c-format +msgid " [--log-directory=log-directory]\n" +msgstr "" + +#: src/help.c:100 src/help.c:137 src/help.c:310 src/help.c:736 src/help.c:799 +#, c-format +msgid " [--log-rotation-size=log-rotation-size]\n" +msgstr "" + +#: src/help.c:101 src/help.c:800 +#, c-format +msgid " [--log-rotation-age=log-rotation-age]\n" +msgstr "" + +#: src/help.c:102 src/help.c:140 src/help.c:203 src/help.c:313 src/help.c:674 +#: src/help.c:801 +#, c-format +msgid " [--retention-redundancy=retention-redundancy]\n" +msgstr "" + +#: src/help.c:103 src/help.c:141 src/help.c:204 src/help.c:314 src/help.c:675 +#: src/help.c:802 +#, c-format +msgid " [--retention-window=retention-window]\n" +msgstr "" + +#: src/help.c:104 src/help.c:142 src/help.c:205 src/help.c:315 src/help.c:676 +#: src/help.c:803 +#, c-format +msgid " [--wal-depth=wal-depth]\n" +msgstr "" + +#: src/help.c:105 src/help.c:144 src/help.c:235 src/help.c:317 src/help.c:804 +#: src/help.c:948 +#, c-format +msgid " [--compress-algorithm=compress-algorithm]\n" +msgstr "" + +#: src/help.c:106 src/help.c:145 src/help.c:236 src/help.c:318 src/help.c:805 +#: src/help.c:949 +#, c-format +msgid " [--compress-level=compress-level]\n" +msgstr "" + +#: src/help.c:107 src/help.c:232 src/help.c:806 src/help.c:945 +#, c-format +msgid " [--archive-timeout=timeout]\n" +msgstr "" + +#: src/help.c:108 src/help.c:147 src/help.c:259 src/help.c:320 src/help.c:807 +#: src/help.c:1045 +#, c-format +msgid " [-d dbname] [-h host] [-p port] [-U username]\n" +msgstr "" + +#: src/help.c:109 src/help.c:149 src/help.c:174 src/help.c:219 src/help.c:237 +#: src/help.c:247 src/help.c:261 src/help.c:322 src/help.c:449 src/help.c:808 +#: src/help.c:906 src/help.c:950 src/help.c:994 src/help.c:1047 +#, c-format +msgid " [--remote-proto] [--remote-host]\n" +msgstr "" + +#: src/help.c:110 src/help.c:150 src/help.c:175 src/help.c:220 src/help.c:238 +#: src/help.c:248 src/help.c:262 src/help.c:323 src/help.c:450 src/help.c:809 +#: src/help.c:907 src/help.c:951 src/help.c:995 src/help.c:1048 +#, c-format +msgid " [--remote-port] [--remote-path] [--remote-user]\n" +msgstr "" + +#: src/help.c:111 src/help.c:151 src/help.c:176 src/help.c:221 src/help.c:239 +#: src/help.c:249 src/help.c:263 src/help.c:324 src/help.c:451 src/help.c:1049 +#, c-format +msgid " [--ssh-options]\n" +msgstr "" + +#: src/help.c:112 +#, c-format +msgid " [--restore-command=cmdline] [--archive-host=destination]\n" +msgstr "" + +#: src/help.c:113 src/help.c:178 +#, c-format +msgid " [--archive-port=port] [--archive-user=username]\n" +msgstr "" + +#: src/help.c:114 src/help.c:119 src/help.c:123 src/help.c:153 src/help.c:179 +#: src/help.c:188 src/help.c:194 src/help.c:209 src/help.c:214 src/help.c:222 +#: src/help.c:226 src/help.c:240 src/help.c:250 src/help.c:264 +#, c-format +msgid " [--help]\n" +msgstr "" + +#: src/help.c:116 +#, c-format +msgid "" +"\n" +" %s set-backup -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:117 +#, c-format +msgid " -i backup-id [--ttl=interval] [--expire-time=timestamp]\n" +msgstr "" + +#: src/help.c:118 +#, c-format +msgid " [--note=text]\n" +msgstr "" + +#: src/help.c:121 +#, c-format +msgid "" +"\n" +" %s show-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:122 +#, c-format +msgid " [--format=format]\n" +msgstr "" + +#: src/help.c:125 +#, c-format +msgid "" +"\n" +" %s backup -B backup-path -b backup-mode --instance=instance_name\n" +msgstr "" + +#: src/help.c:126 src/help.c:299 +#, c-format +msgid " [-D pgdata-path] [-C]\n" +msgstr "" + +#: src/help.c:127 src/help.c:300 +#, c-format +msgid " [--stream [-S slot-name] [--temp-slot]]\n" +msgstr "" + +#: src/help.c:128 src/help.c:301 +#, c-format +msgid " [--backup-pg-log] [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:129 src/help.c:168 src/help.c:302 src/help.c:433 +#, c-format +msgid " [--no-validate] [--skip-block-validation]\n" +msgstr "" + +#: src/help.c:131 src/help.c:304 +#, c-format +msgid " [--no-sync]\n" +msgstr "" + +#: src/help.c:138 src/help.c:311 +#, c-format +msgid " [--log-rotation-age=log-rotation-age] [--no-color]\n" +msgstr "" + +#: src/help.c:139 src/help.c:312 +#, c-format +msgid " [--delete-expired] [--delete-wal] [--merge-expired]\n" +msgstr "" + +#: src/help.c:143 src/help.c:316 +#, c-format +msgid " [--compress]\n" +msgstr "" + +#: src/help.c:146 src/help.c:319 +#, c-format +msgid " [--archive-timeout=archive-timeout]\n" +msgstr "" + +#: src/help.c:148 src/help.c:260 src/help.c:321 src/help.c:1046 +#, c-format +msgid " [-w --no-password] [-W --password]\n" +msgstr "" + +#: src/help.c:152 +#, c-format +msgid " [--ttl=interval] [--expire-time=timestamp] [--note=text]\n" +msgstr "" + +#: src/help.c:156 +#, c-format +msgid "" +"\n" +" %s restore -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:157 src/help.c:431 +#, c-format +msgid " [-D pgdata-path] [-i backup-id] [-j num-threads]\n" +msgstr "" + +#: src/help.c:158 src/help.c:183 src/help.c:439 src/help.c:552 +#, c-format +msgid " [--recovery-target-time=time|--recovery-target-xid=xid\n" +msgstr "" + +#: src/help.c:159 src/help.c:184 src/help.c:440 src/help.c:553 +#, c-format +msgid " |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n" +msgstr "" + +#: src/help.c:160 src/help.c:185 src/help.c:441 src/help.c:554 +#, c-format +msgid " [--recovery-target-timeline=timeline]\n" +msgstr "" + +#: src/help.c:161 src/help.c:442 +#, c-format +msgid " [--recovery-target=immediate|latest]\n" +msgstr "" + +#: src/help.c:162 src/help.c:186 src/help.c:443 src/help.c:555 +#, c-format +msgid " [--recovery-target-name=target-name]\n" +msgstr "" + +#: src/help.c:163 src/help.c:444 +#, c-format +msgid " [--recovery-target-action=pause|promote|shutdown]\n" +msgstr "" + +#: src/help.c:164 src/help.c:445 src/help.c:793 +#, c-format +msgid " [--restore-command=cmdline]\n" +msgstr "" + +#: src/help.c:165 +#, c-format +msgid " [-R | --restore-as-replica] [--force]\n" +msgstr "" + +#: src/help.c:166 src/help.c:447 +#, c-format +msgid " [--primary-conninfo=primary_conninfo]\n" +msgstr "" + +#: src/help.c:167 src/help.c:448 +#, c-format +msgid " [-S | --primary-slot-name=slotname]\n" +msgstr "" + +#: src/help.c:169 +#, c-format +msgid " [-T OLDDIR=NEWDIR] [--progress]\n" +msgstr "" + +#: src/help.c:170 src/help.c:435 +#, c-format +msgid " [--external-mapping=OLDDIR=NEWDIR]\n" +msgstr "" + +#: src/help.c:171 +#, c-format +msgid " [--skip-external-dirs] [--no-sync]\n" +msgstr "" + +#: src/help.c:172 src/help.c:437 +#, c-format +msgid " [-I | --incremental-mode=none|checksum|lsn]\n" +msgstr "" + +#: src/help.c:173 +#, c-format +msgid " [--db-include | --db-exclude]\n" +msgstr "" + +#: src/help.c:177 +#, c-format +msgid " [--archive-host=hostname]\n" +msgstr "" + +#: src/help.c:181 +#, c-format +msgid "" +"\n" +" %s validate -B backup-path [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:182 src/help.c:551 +#, c-format +msgid " [-i backup-id] [--progress] [-j num-threads]\n" +msgstr "" + +#: src/help.c:187 +#, c-format +msgid " [--skip-block-validation]\n" +msgstr "" + +#: src/help.c:190 +#, c-format +msgid "" +"\n" +" %s checkdb [-B backup-path] [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:191 +#, c-format +msgid " [-D pgdata-path] [--progress] [-j num-threads]\n" +msgstr "" + +#: src/help.c:192 src/help.c:603 +#, c-format +msgid " [--amcheck] [--skip-block-validation]\n" +msgstr "" + +#: src/help.c:193 +#, c-format +msgid " [--heapallindexed] [--checkunique]\n" +msgstr "" + +#: src/help.c:196 +#, c-format +msgid "" +"\n" +" %s show -B backup-path\n" +msgstr "" + +#: src/help.c:197 src/help.c:657 +#, c-format +msgid " [--instance=instance_name [-i backup-id]]\n" +msgstr "" + +#: src/help.c:198 +#, c-format +msgid " [--format=format] [--archive]\n" +msgstr "" + +#: src/help.c:199 +#, c-format +msgid " [--no-color] [--help]\n" +msgstr "" + +#: src/help.c:201 +#, c-format +msgid "" +"\n" +" %s delete -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:202 src/help.c:673 +#, c-format +msgid " [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:206 +#, c-format +msgid " [-i backup-id | --delete-expired | --merge-expired | --status=backup_status]\n" +msgstr "" + +#: src/help.c:207 +#, c-format +msgid " [--delete-wal]\n" +msgstr "" + +#: src/help.c:208 +#, c-format +msgid " [--dry-run] [--no-validate] [--no-sync]\n" +msgstr "" + +#: src/help.c:211 +#, c-format +msgid "" +"\n" +" %s merge -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:212 +#, c-format +msgid " -i backup-id [--progress] [-j num-threads]\n" +msgstr "" + +#: src/help.c:213 src/help.c:730 +#, c-format +msgid " [--no-validate] [--no-sync]\n" +msgstr "" + +#: src/help.c:216 +#, c-format +msgid "" +"\n" +" %s add-instance -B backup-path -D pgdata-path\n" +msgstr "" + +#: src/help.c:217 src/help.c:225 src/help.c:904 +#, c-format +msgid " --instance=instance_name\n" +msgstr "" + +#: src/help.c:224 +#, c-format +msgid "" +"\n" +" %s del-instance -B backup-path\n" +msgstr "" + +#: src/help.c:228 +#, c-format +msgid "" +"\n" +" %s archive-push -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:229 src/help.c:244 src/help.c:942 src/help.c:990 +#, c-format +msgid " --wal-file-name=wal-file-name\n" +msgstr "" + +#: src/help.c:230 src/help.c:943 src/help.c:991 +#, c-format +msgid " [--wal-file-path=wal-file-path]\n" +msgstr "" + +#: src/help.c:231 src/help.c:245 src/help.c:944 src/help.c:992 +#, c-format +msgid " [-j num-threads] [--batch-size=batch_size]\n" +msgstr "" + +#: src/help.c:233 src/help.c:946 +#, c-format +msgid " [--no-ready-rename] [--no-sync]\n" +msgstr "" + +#: src/help.c:234 src/help.c:947 +#, c-format +msgid " [--overwrite] [--compress]\n" +msgstr "" + +#: src/help.c:242 +#, c-format +msgid "" +"\n" +" %s archive-get -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:243 +#, c-format +msgid " --wal-file-path=wal-file-path\n" +msgstr "" + +#: src/help.c:246 src/help.c:993 +#, c-format +msgid " [--no-validate-wal]\n" +msgstr "" + +#: src/help.c:252 +#, c-format +msgid "" +"\n" +" %s catchup -b catchup-mode\n" +msgstr "" + +#: src/help.c:253 src/help.c:1039 +#, c-format +msgid " --source-pgdata=path_to_pgdata_on_remote_server\n" +msgstr "" + +#: src/help.c:254 src/help.c:1040 +#, c-format +msgid " --destination-pgdata=path_to_local_dir\n" +msgstr "" + +#: src/help.c:255 +#, c-format +msgid " [--stream [-S slot-name] [--temp-slot | --perm-slot]]\n" +msgstr "" + +#: src/help.c:256 src/help.c:1042 +#, c-format +msgid " [-j num-threads]\n" +msgstr "" + +#: src/help.c:257 src/help.c:434 src/help.c:1043 +#, c-format +msgid " [-T OLDDIR=NEWDIR]\n" +msgstr "" + +#: src/help.c:258 src/help.c:1044 +#, c-format +msgid " [--exclude-path=path_prefix]\n" +msgstr "" + +#: src/help.c:270 +#, c-format +msgid "Read the website for details <%s>.\n" +msgstr "Подробнее читайте на сайте <%s>.\n" + +#: src/help.c:272 +#, c-format +msgid "Report bugs to <%s>.\n" +msgstr "Сообщайте об ошибках в <%s>.\n" + +#: src/help.c:279 +#, c-format +msgid "" +"\n" +"Unknown command. Try pg_probackup help\n" +"\n" +msgstr "" +"\n" +"Неизвестная команда. Попробуйте pg_probackup help\n" +"\n" + +#: src/help.c:285 +#, c-format +msgid "" +"\n" +"This command is intended for internal use\n" +"\n" +msgstr "" + +#: src/help.c:291 +#, c-format +msgid "" +"\n" +"%s init -B backup-path\n" +"\n" +msgstr "" + +#: src/help.c:292 +#, c-format +msgid "" +" -B, --backup-path=backup-path location of the backup storage area\n" +"\n" +msgstr "" + +#: src/help.c:298 +#, c-format +msgid "" +"\n" +"%s backup -B backup-path -b backup-mode --instance=instance_name\n" +msgstr "" + +#: src/help.c:303 src/help.c:792 +#, c-format +msgid " [-E external-directories-paths]\n" +msgstr "" + +#: src/help.c:325 +#, c-format +msgid "" +" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n" +"\n" +msgstr "" + +#: src/help.c:327 src/help.c:455 src/help.c:558 src/help.c:606 src/help.c:660 +#: src/help.c:679 src/help.c:739 src/help.c:812 src/help.c:895 src/help.c:910 +#: src/help.c:934 src/help.c:954 src/help.c:998 +#, c-format +msgid " -B, --backup-path=backup-path location of the backup storage area\n" +msgstr "" + +#: src/help.c:328 +#, c-format +msgid " -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n" +msgstr "" + +#: src/help.c:329 src/help.c:456 src/help.c:559 src/help.c:607 src/help.c:680 +#: src/help.c:740 src/help.c:813 src/help.c:896 +#, c-format +msgid " --instance=instance_name name of the instance\n" +msgstr "" + +#: src/help.c:330 src/help.c:458 src/help.c:608 src/help.c:814 src/help.c:911 +#, c-format +msgid " -D, --pgdata=pgdata-path location of the database storage area\n" +msgstr "" + +#: src/help.c:331 +#, c-format +msgid " -C, --smooth-checkpoint do smooth checkpoint before backup\n" +msgstr "" + +#: src/help.c:332 +#, c-format +msgid " --stream stream the transaction log and include it in the backup\n" +msgstr "" + +#: src/help.c:333 src/help.c:1054 +#, c-format +msgid " -S, --slot=SLOTNAME replication slot to use\n" +msgstr "" + +#: src/help.c:334 src/help.c:1055 +#, c-format +msgid " --temp-slot use temporary replication slot\n" +msgstr "" + +#: src/help.c:335 +#, c-format +msgid " --backup-pg-log backup of '%s' directory\n" +msgstr "" + +#: src/help.c:336 src/help.c:460 src/help.c:563 src/help.c:611 src/help.c:682 +#: src/help.c:743 src/help.c:960 src/help.c:1004 src/help.c:1058 +#, c-format +msgid " -j, --threads=NUM number of parallel threads\n" +msgstr "" + +#: src/help.c:337 src/help.c:462 src/help.c:562 src/help.c:610 src/help.c:683 +#: src/help.c:744 +#, c-format +msgid " --progress show progress\n" +msgstr "" + +#: src/help.c:338 +#, c-format +msgid " --no-validate disable validation after backup\n" +msgstr "" + +#: src/help.c:339 src/help.c:466 src/help.c:573 +#, c-format +msgid " --skip-block-validation set to validate only file-level checksum\n" +msgstr "" + +#: src/help.c:340 src/help.c:815 src/help.c:914 +#, c-format +msgid " -E --external-dirs=external-directories-paths\n" +msgstr "" + +#: src/help.c:341 src/help.c:816 src/help.c:915 +#, c-format +msgid " backup some directories not from pgdata \n" +msgstr "" + +#: src/help.c:342 src/help.c:817 src/help.c:916 +#, c-format +msgid " (example: --external-dirs=/tmp/dir1:/tmp/dir2)\n" +msgstr "" + +#: src/help.c:343 +#, c-format +msgid " --no-sync do not sync backed up files to disk\n" +msgstr "" + +#: src/help.c:344 +#, c-format +msgid " --note=text add note to backup\n" +msgstr "" + +#: src/help.c:345 src/help.c:784 +#, c-format +msgid " (example: --note='backup before app update to v13.1')\n" +msgstr "" + +#: src/help.c:347 src/help.c:508 src/help.c:575 src/help.c:622 src/help.c:702 +#: src/help.c:748 src/help.c:820 +#, c-format +msgid "" +"\n" +" Logging options:\n" +msgstr "" + +#: src/help.c:348 src/help.c:509 src/help.c:576 src/help.c:623 src/help.c:703 +#: src/help.c:749 src/help.c:821 +#, c-format +msgid " --log-level-console=log-level-console\n" +msgstr "" + +#: src/help.c:349 src/help.c:510 src/help.c:577 src/help.c:624 src/help.c:704 +#: src/help.c:750 src/help.c:822 +#, c-format +msgid " level for console logging (default: info)\n" +msgstr "" + +#: src/help.c:350 src/help.c:353 src/help.c:511 src/help.c:514 src/help.c:578 +#: src/help.c:581 src/help.c:625 src/help.c:628 src/help.c:705 src/help.c:708 +#: src/help.c:751 src/help.c:754 src/help.c:823 src/help.c:826 +#, c-format +msgid " available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n" +msgstr "" + +#: src/help.c:351 src/help.c:512 src/help.c:579 src/help.c:626 src/help.c:706 +#: src/help.c:752 src/help.c:824 +#, c-format +msgid " --log-level-file=log-level-file\n" +msgstr "" + +#: src/help.c:352 src/help.c:513 src/help.c:580 src/help.c:627 src/help.c:707 +#: src/help.c:753 src/help.c:825 +#, c-format +msgid " level for file logging (default: off)\n" +msgstr "" + +#: src/help.c:354 src/help.c:515 src/help.c:582 src/help.c:629 src/help.c:709 +#: src/help.c:755 src/help.c:827 +#, c-format +msgid " --log-filename=log-filename\n" +msgstr "" + +#: src/help.c:355 src/help.c:516 src/help.c:583 src/help.c:630 src/help.c:710 +#: src/help.c:756 src/help.c:828 +#, c-format +msgid " filename for file logging (default: 'pg_probackup.log')\n" +msgstr "" + +#: src/help.c:356 src/help.c:517 src/help.c:584 src/help.c:711 src/help.c:757 +#: src/help.c:829 +#, c-format +msgid " support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n" +msgstr "" + +#: src/help.c:357 src/help.c:518 src/help.c:585 src/help.c:632 src/help.c:712 +#: src/help.c:758 src/help.c:830 +#, c-format +msgid " --error-log-filename=error-log-filename\n" +msgstr "" + +#: src/help.c:358 src/help.c:519 src/help.c:586 src/help.c:633 src/help.c:713 +#: src/help.c:759 src/help.c:831 +#, c-format +msgid " filename for error logging (default: none)\n" +msgstr "" + +#: src/help.c:359 src/help.c:520 src/help.c:587 src/help.c:634 src/help.c:714 +#: src/help.c:760 src/help.c:832 +#, c-format +msgid " --log-directory=log-directory\n" +msgstr "" + +#: src/help.c:360 src/help.c:521 src/help.c:588 src/help.c:635 src/help.c:715 +#: src/help.c:761 src/help.c:833 +#, c-format +msgid " directory for file logging (default: BACKUP_PATH/log)\n" +msgstr "" + +#: src/help.c:361 src/help.c:522 src/help.c:589 src/help.c:636 src/help.c:716 +#: src/help.c:762 src/help.c:834 +#, c-format +msgid " --log-rotation-size=log-rotation-size\n" +msgstr "" + +#: src/help.c:362 src/help.c:523 src/help.c:590 src/help.c:637 src/help.c:717 +#: src/help.c:763 src/help.c:835 +#, c-format +msgid " rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:363 src/help.c:524 src/help.c:591 src/help.c:638 src/help.c:718 +#: src/help.c:764 src/help.c:836 +#, c-format +msgid " available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n" +msgstr "" + +#: src/help.c:364 src/help.c:525 src/help.c:592 src/help.c:639 src/help.c:719 +#: src/help.c:765 src/help.c:837 +#, c-format +msgid " --log-rotation-age=log-rotation-age\n" +msgstr "" + +#: src/help.c:365 src/help.c:526 src/help.c:593 src/help.c:640 src/help.c:720 +#: src/help.c:766 src/help.c:838 +#, c-format +msgid " rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:366 src/help.c:527 src/help.c:594 src/help.c:641 src/help.c:721 +#: src/help.c:767 src/help.c:839 +#, c-format +msgid " available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n" +msgstr "" + +#: src/help.c:367 src/help.c:528 src/help.c:642 +#, c-format +msgid " --no-color disable the coloring of error and warning console messages\n" +msgstr "" + +#: src/help.c:369 src/help.c:687 src/help.c:841 +#, c-format +msgid "" +"\n" +" Retention options:\n" +msgstr "" + +#: src/help.c:370 src/help.c:688 +#, c-format +msgid " --delete-expired delete backups expired according to current\n" +msgstr "" + +#: src/help.c:371 src/help.c:373 +#, c-format +msgid " retention policy after successful backup completion\n" +msgstr "" + +#: src/help.c:372 src/help.c:690 +#, c-format +msgid " --merge-expired merge backups expired according to current\n" +msgstr "" + +#: src/help.c:374 src/help.c:692 +#, c-format +msgid " --delete-wal remove redundant files in WAL archive\n" +msgstr "" + +#: src/help.c:375 src/help.c:693 src/help.c:842 +#, c-format +msgid " --retention-redundancy=retention-redundancy\n" +msgstr "" + +#: src/help.c:376 src/help.c:694 src/help.c:843 +#, c-format +msgid " number of full backups to keep; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:377 src/help.c:695 src/help.c:844 +#, c-format +msgid " --retention-window=retention-window\n" +msgstr "" + +#: src/help.c:378 src/help.c:696 src/help.c:845 +#, c-format +msgid " number of days of recoverability; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:379 src/help.c:697 +#, c-format +msgid " --wal-depth=wal-depth number of latest valid backups per timeline that must\n" +msgstr "" + +#: src/help.c:380 src/help.c:698 +#, c-format +msgid " retain the ability to perform PITR; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:381 src/help.c:699 +#, c-format +msgid " --dry-run perform a trial run without any changes\n" +msgstr "" + +#: src/help.c:383 +#, c-format +msgid "" +"\n" +" Pinning options:\n" +msgstr "" + +#: src/help.c:384 src/help.c:778 +#, c-format +msgid " --ttl=interval pin backup for specified amount of time; 0 unpin\n" +msgstr "" + +#: src/help.c:385 src/help.c:779 +#, c-format +msgid " available units: 'ms', 's', 'min', 'h', 'd' (default: s)\n" +msgstr "" + +#: src/help.c:386 src/help.c:780 +#, c-format +msgid " (example: --ttl=20d)\n" +msgstr "" + +#: src/help.c:387 src/help.c:781 +#, c-format +msgid " --expire-time=time pin backup until specified time stamp\n" +msgstr "" + +#: src/help.c:388 src/help.c:782 +#, c-format +msgid " (example: --expire-time='2024-01-01 00:00:00+03')\n" +msgstr "" + +#: src/help.c:390 src/help.c:849 src/help.c:967 +#, c-format +msgid "" +"\n" +" Compression options:\n" +msgstr "" + +#: src/help.c:391 src/help.c:850 src/help.c:968 +#, c-format +msgid " --compress alias for --compress-algorithm='zlib' and --compress-level=1\n" +msgstr "" + +#: src/help.c:392 src/help.c:851 src/help.c:969 +#, c-format +msgid " --compress-algorithm=compress-algorithm\n" +msgstr "" + +#: src/help.c:393 +#, c-format +msgid " available options: 'zlib', 'pglz', 'none' (default: none)\n" +msgstr "" + +#: src/help.c:394 src/help.c:853 src/help.c:971 +#, c-format +msgid " --compress-level=compress-level\n" +msgstr "" + +#: src/help.c:395 src/help.c:854 src/help.c:972 +#, c-format +msgid " level of compression [0-9] (default: 1)\n" +msgstr "" + +#: src/help.c:397 src/help.c:856 +#, c-format +msgid "" +"\n" +" Archive options:\n" +msgstr "" + +#: src/help.c:398 src/help.c:857 +#, c-format +msgid " --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n" +msgstr "" + +#: src/help.c:400 src/help.c:644 src/help.c:859 src/help.c:1066 +#, c-format +msgid "" +"\n" +" Connection options:\n" +msgstr "" + +#: src/help.c:401 src/help.c:645 src/help.c:860 src/help.c:1067 +#, c-format +msgid " -U, --pguser=USERNAME user name to connect as (default: current local user)\n" +msgstr "" + +#: src/help.c:402 src/help.c:646 src/help.c:861 src/help.c:1068 +#, c-format +msgid " -d, --pgdatabase=DBNAME database to connect (default: username)\n" +msgstr "" + +#: src/help.c:403 src/help.c:647 src/help.c:862 src/help.c:1069 +#, c-format +msgid " -h, --pghost=HOSTNAME database server host or socket directory(default: 'local socket')\n" +msgstr "" + +#: src/help.c:404 src/help.c:648 src/help.c:863 src/help.c:1070 +#, c-format +msgid " -p, --pgport=PORT database server port (default: 5432)\n" +msgstr "" + +#: src/help.c:405 src/help.c:649 src/help.c:1071 +#, c-format +msgid " -w, --no-password never prompt for password\n" +msgstr "" + +#: src/help.c:406 +#, c-format +msgid " -W, --password force password prompt\n" +msgstr "" + +#: src/help.c:408 src/help.c:530 src/help.c:865 src/help.c:917 src/help.c:974 +#: src/help.c:1009 src/help.c:1074 +#, c-format +msgid "" +"\n" +" Remote options:\n" +msgstr "" + +#: src/help.c:409 src/help.c:531 src/help.c:866 src/help.c:918 src/help.c:975 +#: src/help.c:1010 src/help.c:1075 +#, c-format +msgid " --remote-proto=protocol remote protocol to use\n" +msgstr "" + +#: src/help.c:410 src/help.c:532 src/help.c:867 src/help.c:919 src/help.c:976 +#: src/help.c:1011 src/help.c:1076 +#, c-format +msgid " available options: 'ssh', 'none' (default: ssh)\n" +msgstr "" + +#: src/help.c:411 src/help.c:533 src/help.c:868 src/help.c:920 +#, c-format +msgid " --remote-host=destination remote host address or hostname\n" +msgstr "" + +#: src/help.c:412 src/help.c:534 src/help.c:869 src/help.c:921 src/help.c:978 +#: src/help.c:1013 src/help.c:1078 +#, c-format +msgid " --remote-port=port remote host port (default: 22)\n" +msgstr "" + +#: src/help.c:413 src/help.c:535 src/help.c:870 src/help.c:922 src/help.c:979 +#: src/help.c:1014 src/help.c:1079 +#, c-format +msgid " --remote-path=path path to directory with pg_probackup binary on remote host\n" +msgstr "" + +#: src/help.c:414 src/help.c:536 src/help.c:871 src/help.c:923 src/help.c:980 +#: src/help.c:1015 src/help.c:1080 +#, c-format +msgid " (default: current binary path)\n" +msgstr "" + +#: src/help.c:415 src/help.c:537 src/help.c:872 src/help.c:924 src/help.c:981 +#: src/help.c:1016 src/help.c:1081 +#, c-format +msgid " --remote-user=username user name for ssh connection (default: current user)\n" +msgstr "" + +#: src/help.c:416 src/help.c:538 src/help.c:873 src/help.c:925 src/help.c:982 +#: src/help.c:1017 src/help.c:1082 +#, c-format +msgid " --ssh-options=ssh_options additional ssh options (default: none)\n" +msgstr "" + +#: src/help.c:417 src/help.c:539 src/help.c:874 +#, c-format +msgid " (example: --ssh-options='-c cipher_spec -F configfile')\n" +msgstr "" + +#: src/help.c:419 src/help.c:881 +#, c-format +msgid "" +"\n" +" Replica options:\n" +msgstr "" + +#: src/help.c:420 src/help.c:882 +#, c-format +msgid " --master-user=user_name user name to connect to master (deprecated)\n" +msgstr "" + +#: src/help.c:421 src/help.c:883 +#, c-format +msgid " --master-db=db_name database to connect to master (deprecated)\n" +msgstr "" + +#: src/help.c:422 src/help.c:884 +#, c-format +msgid " --master-host=host_name database server host of master (deprecated)\n" +msgstr "" + +#: src/help.c:423 src/help.c:885 +#, c-format +msgid " --master-port=port database server port of master (deprecated)\n" +msgstr "" + +#: src/help.c:424 src/help.c:886 +#, c-format +msgid "" +" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n" +"\n" +msgstr "" + +#: src/help.c:430 +#, c-format +msgid "" +"\n" +"%s restore -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:432 +#, c-format +msgid " [--progress] [--force] [--no-sync]\n" +msgstr "" + +#: src/help.c:436 +#, c-format +msgid " [--skip-external-dirs]\n" +msgstr "" + +#: src/help.c:438 +#, c-format +msgid " [--db-include dbname | --db-exclude dbname]\n" +msgstr "" + +#: src/help.c:446 +#, c-format +msgid " [-R | --restore-as-replica]\n" +msgstr "" + +#: src/help.c:452 +#, c-format +msgid " [--archive-host=hostname] [--archive-port=port]\n" +msgstr "" + +#: src/help.c:453 +#, c-format +msgid "" +" [--archive-user=username]\n" +"\n" +msgstr "" + +#: src/help.c:459 +#, c-format +msgid " -i, --backup-id=backup-id backup to restore\n" +msgstr "" + +#: src/help.c:463 +#, c-format +msgid " --force ignore invalid status of the restored backup\n" +msgstr "" + +#: src/help.c:464 +#, c-format +msgid " --no-sync do not sync restored files to disk\n" +msgstr "" + +#: src/help.c:465 +#, c-format +msgid " --no-validate disable backup validation during restore\n" +msgstr "" + +#: src/help.c:468 src/help.c:1060 +#, c-format +msgid " -T, --tablespace-mapping=OLDDIR=NEWDIR\n" +msgstr "" + +#: src/help.c:469 src/help.c:1061 +#, c-format +msgid " relocate the tablespace from directory OLDDIR to NEWDIR\n" +msgstr "" + +#: src/help.c:470 +#, c-format +msgid " --external-mapping=OLDDIR=NEWDIR\n" +msgstr "" + +#: src/help.c:471 +#, c-format +msgid " relocate the external directory from OLDDIR to NEWDIR\n" +msgstr "" + +#: src/help.c:472 +#, c-format +msgid " --skip-external-dirs do not restore all external directories\n" +msgstr "" + +#: src/help.c:474 +#, c-format +msgid "" +"\n" +" Incremental restore options:\n" +msgstr "" + +#: src/help.c:475 +#, c-format +msgid " -I, --incremental-mode=none|checksum|lsn\n" +msgstr "" + +#: src/help.c:476 +#, c-format +msgid " reuse valid pages available in PGDATA if they have not changed\n" +msgstr "" + +#: src/help.c:477 +#, c-format +msgid " (default: none)\n" +msgstr "" + +#: src/help.c:479 +#, c-format +msgid "" +"\n" +" Partial restore options:\n" +msgstr "" + +#: src/help.c:480 +#, c-format +msgid " --db-include dbname restore only specified databases\n" +msgstr "" + +#: src/help.c:481 +#, c-format +msgid " --db-exclude dbname do not restore specified databases\n" +msgstr "" + +#: src/help.c:483 +#, c-format +msgid "" +"\n" +" Recovery options:\n" +msgstr "" + +#: src/help.c:484 src/help.c:564 +#, c-format +msgid " --recovery-target-time=time time stamp up to which recovery will proceed\n" +msgstr "" + +#: src/help.c:485 src/help.c:565 +#, c-format +msgid " --recovery-target-xid=xid transaction ID up to which recovery will proceed\n" +msgstr "" + +#: src/help.c:486 src/help.c:566 +#, c-format +msgid " --recovery-target-lsn=lsn LSN of the write-ahead log location up to which recovery will proceed\n" +msgstr "" + +#: src/help.c:487 src/help.c:567 +#, c-format +msgid " --recovery-target-inclusive=boolean\n" +msgstr "" + +#: src/help.c:488 src/help.c:568 +#, c-format +msgid " whether we stop just after the recovery target\n" +msgstr "" + +#: src/help.c:489 src/help.c:569 +#, c-format +msgid " --recovery-target-timeline=timeline\n" +msgstr "" + +#: src/help.c:490 src/help.c:570 +#, c-format +msgid " recovering into a particular timeline\n" +msgstr "" + +#: src/help.c:491 +#, c-format +msgid " --recovery-target=immediate|latest\n" +msgstr "" + +#: src/help.c:492 +#, c-format +msgid " end recovery as soon as a consistent state is reached or as late as possible\n" +msgstr "" + +#: src/help.c:493 src/help.c:571 +#, c-format +msgid " --recovery-target-name=target-name\n" +msgstr "" + +#: src/help.c:494 src/help.c:572 +#, c-format +msgid " the named restore point to which recovery will proceed\n" +msgstr "" + +#: src/help.c:495 +#, c-format +msgid " --recovery-target-action=pause|promote|shutdown\n" +msgstr "" + +#: src/help.c:496 +#, c-format +msgid " action the server should take once the recovery target is reached\n" +msgstr "" + +#: src/help.c:497 +#, c-format +msgid " (default: pause)\n" +msgstr "" + +#: src/help.c:498 src/help.c:818 +#, c-format +msgid " --restore-command=cmdline command to use as 'restore_command' in recovery.conf; 'none' disables\n" +msgstr "" + +#: src/help.c:500 +#, c-format +msgid "" +"\n" +" Standby options:\n" +msgstr "" + +#: src/help.c:501 +#, c-format +msgid " -R, --restore-as-replica write a minimal recovery.conf in the output directory\n" +msgstr "" + +#: src/help.c:502 +#, c-format +msgid " to ease setting up a standby server\n" +msgstr "" + +#: src/help.c:503 +#, c-format +msgid " --primary-conninfo=primary_conninfo\n" +msgstr "" + +#: src/help.c:504 +#, c-format +msgid " connection string to be used for establishing connection\n" +msgstr "" + +#: src/help.c:505 +#, c-format +msgid " with the primary server\n" +msgstr "" + +#: src/help.c:506 +#, c-format +msgid " -S, --primary-slot-name=slotname replication slot to be used for WAL streaming from the primary server\n" +msgstr "" + +#: src/help.c:541 src/help.c:876 +#, c-format +msgid "" +"\n" +" Remote WAL archive options:\n" +msgstr "" + +#: src/help.c:542 src/help.c:877 +#, c-format +msgid " --archive-host=destination address or hostname for ssh connection to archive host\n" +msgstr "" + +#: src/help.c:543 src/help.c:878 +#, c-format +msgid " --archive-port=port port for ssh connection to archive host (default: 22)\n" +msgstr "" + +#: src/help.c:544 +#, c-format +msgid "" +" --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n" +"\n" +msgstr "" + +#: src/help.c:550 +#, c-format +msgid "" +"\n" +"%s validate -B backup-path [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:556 +#, c-format +msgid "" +" [--skip-block-validation]\n" +"\n" +msgstr "" + +#: src/help.c:560 +#, c-format +msgid " -i, --backup-id=backup-id backup to validate\n" +msgstr "" + +#: src/help.c:595 src/help.c:722 src/help.c:768 +#, c-format +msgid "" +" --no-color disable the coloring of error and warning console messages\n" +"\n" +msgstr "" + +#: src/help.c:601 +#, c-format +msgid "" +"\n" +"%s checkdb [-B backup-path] [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:602 +#, c-format +msgid " [-D pgdata-path] [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:604 +#, c-format +msgid "" +" [--heapallindexed] [--checkunique]\n" +"\n" +msgstr "" + +#: src/help.c:612 +#, c-format +msgid " --skip-block-validation skip file-level checking\n" +msgstr "" + +#: src/help.c:613 src/help.c:618 src/help.c:620 +#, c-format +msgid " can be used only with '--amcheck' option\n" +msgstr "" + +#: src/help.c:614 +#, c-format +msgid " --amcheck in addition to file-level block checking\n" +msgstr "" + +#: src/help.c:615 +#, c-format +msgid " check btree indexes via function 'bt_index_check()'\n" +msgstr "" + +#: src/help.c:616 +#, c-format +msgid " using 'amcheck' or 'amcheck_next' extensions\n" +msgstr "" + +#: src/help.c:617 +#, c-format +msgid " --heapallindexed also check that heap is indexed\n" +msgstr "" + +#: src/help.c:619 +#, c-format +msgid " --checkunique also check unique constraints\n" +msgstr "" + +#: src/help.c:631 +#, c-format +msgid " support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n" +msgstr "" + +#: src/help.c:650 src/help.c:1072 +#, c-format +msgid "" +" -W, --password force password prompt\n" +"\n" +msgstr "" + +#: src/help.c:656 +#, c-format +msgid "" +"\n" +"%s show -B backup-path\n" +msgstr "" + +#: src/help.c:658 +#, c-format +msgid "" +" [--format=format] [--archive]\n" +"\n" +msgstr "" + +#: src/help.c:661 +#, c-format +msgid " --instance=instance_name show info about specific instance\n" +msgstr "" + +#: src/help.c:662 +#, c-format +msgid " -i, --backup-id=backup-id show info about specific backups\n" +msgstr "" + +#: src/help.c:663 +#, c-format +msgid " --archive show WAL archive information\n" +msgstr "" + +#: src/help.c:664 +#, c-format +msgid " --format=format show format=PLAIN|JSON\n" +msgstr "" + +#: src/help.c:665 +#, c-format +msgid "" +" --no-color disable the coloring for plain format\n" +"\n" +msgstr "" + +#: src/help.c:671 +#, c-format +msgid "" +"\n" +"%s delete -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:672 +#, c-format +msgid " [-i backup-id | --delete-expired | --merge-expired] [--delete-wal]\n" +msgstr "" + +#: src/help.c:677 +#, c-format +msgid "" +" [--no-validate] [--no-sync]\n" +"\n" +msgstr "" + +#: src/help.c:681 +#, c-format +msgid " -i, --backup-id=backup-id backup to delete\n" +msgstr "" + +#: src/help.c:684 src/help.c:745 +#, c-format +msgid " --no-validate disable validation during retention merge\n" +msgstr "" + +#: src/help.c:685 src/help.c:746 +#, c-format +msgid " --no-sync do not sync merged files to disk\n" +msgstr "" + +#: src/help.c:689 src/help.c:691 +#, c-format +msgid " retention policy\n" +msgstr "" + +#: src/help.c:700 +#, c-format +msgid " --status=backup_status delete all backups with specified status\n" +msgstr "" + +#: src/help.c:728 +#, c-format +msgid "" +"\n" +"%s merge -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:729 +#, c-format +msgid " -i backup-id [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:737 +#, c-format +msgid "" +" [--log-rotation-age=log-rotation-age]\n" +"\n" +msgstr "" + +#: src/help.c:741 +#, c-format +msgid " -i, --backup-id=backup-id backup to merge\n" +msgstr "" + +#: src/help.c:774 +#, c-format +msgid "" +"\n" +"%s set-backup -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:775 +#, c-format +msgid " -i backup-id\n" +msgstr "" + +#: src/help.c:776 +#, c-format +msgid "" +" [--ttl=interval] [--expire-time=time] [--note=text]\n" +"\n" +msgstr "" + +#: src/help.c:783 +#, c-format +msgid " --note=text add note to backup; 'none' to remove note\n" +msgstr "" + +#: src/help.c:790 +#, c-format +msgid "" +"\n" +"%s set-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:810 src/help.c:908 src/help.c:952 src/help.c:996 +#, c-format +msgid "" +" [--ssh-options]\n" +"\n" +msgstr "" + +#: src/help.c:846 +#, c-format +msgid " --wal-depth=wal-depth number of latest valid backups with ability to perform\n" +msgstr "" + +#: src/help.c:847 +#, c-format +msgid " the point in time recovery; disables; (default: 0)\n" +msgstr "" + +#: src/help.c:852 src/help.c:970 +#, c-format +msgid " available options: 'zlib','pglz','none' (default: 'none')\n" +msgstr "" + +#: src/help.c:879 +#, c-format +msgid " --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n" +msgstr "" + +#: src/help.c:892 +#, c-format +msgid "" +"\n" +"%s show-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:893 +#, c-format +msgid "" +" [--format=format]\n" +"\n" +msgstr "" + +#: src/help.c:897 +#, c-format +msgid "" +" --format=format show format=PLAIN|JSON\n" +"\n" +msgstr "" + +#: src/help.c:903 +#, c-format +msgid "" +"\n" +"%s add-instance -B backup-path -D pgdata-path\n" +msgstr "" + +#: src/help.c:905 +#, c-format +msgid " [-E external-directory-path]\n" +msgstr "" + +#: src/help.c:912 +#, c-format +msgid " --instance=instance_name name of the new instance\n" +msgstr "" + +#: src/help.c:926 src/help.c:983 src/help.c:1018 src/help.c:1083 +#, c-format +msgid "" +" (example: --ssh-options='-c cipher_spec -F configfile')\n" +"\n" +msgstr "" + +#: src/help.c:932 +#, c-format +msgid "" +"\n" +"%s del-instance -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:935 +#, c-format +msgid "" +" --instance=instance_name name of the instance to delete\n" +"\n" +msgstr "" + +#: src/help.c:941 +#, c-format +msgid "" +"\n" +"%s archive-push -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:955 src/help.c:999 +#, c-format +msgid " --instance=instance_name name of the instance to delete\n" +msgstr "" + +#: src/help.c:956 src/help.c:1002 +#, c-format +msgid " --wal-file-name=wal-file-name\n" +msgstr "" + +#: src/help.c:957 +#, c-format +msgid " name of the file to copy into WAL archive\n" +msgstr "" + +#: src/help.c:958 src/help.c:1000 +#, c-format +msgid " --wal-file-path=wal-file-path\n" +msgstr "" + +#: src/help.c:959 +#, c-format +msgid " relative destination path of the WAL archive\n" +msgstr "" + +#: src/help.c:961 +#, c-format +msgid " --batch-size=NUM number of files to be copied\n" +msgstr "" + +#: src/help.c:962 +#, c-format +msgid " --archive-timeout=timeout wait timeout before discarding stale temp file(default: 5min)\n" +msgstr "" + +#: src/help.c:963 +#, c-format +msgid " --no-ready-rename do not rename '.ready' files in 'archive_status' directory\n" +msgstr "" + +#: src/help.c:964 +#, c-format +msgid " --no-sync do not sync WAL file to disk\n" +msgstr "" + +#: src/help.c:965 +#, c-format +msgid " --overwrite overwrite archived WAL file\n" +msgstr "" + +#: src/help.c:977 src/help.c:1012 src/help.c:1077 +#, c-format +msgid " --remote-host=hostname remote host address or hostname\n" +msgstr "" + +#: src/help.c:989 +#, c-format +msgid "" +"\n" +"%s archive-get -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:1001 +#, c-format +msgid " relative destination path name of the WAL file on the server\n" +msgstr "" + +#: src/help.c:1003 +#, c-format +msgid " name of the WAL file to retrieve from the archive\n" +msgstr "" + +#: src/help.c:1005 +#, c-format +msgid " --batch-size=NUM number of files to be prefetched\n" +msgstr "" + +#: src/help.c:1006 +#, c-format +msgid " --prefetch-dir=path location of the store area for prefetched WAL files\n" +msgstr "" + +#: src/help.c:1007 +#, c-format +msgid " --no-validate-wal skip validation of prefetched WAL file before using it\n" +msgstr "" + +#: src/help.c:1024 +#, c-format +msgid "" +"\n" +"%s help [command]\n" +msgstr "" + +#: src/help.c:1025 +#, c-format +msgid "" +"%s command --help\n" +"\n" +msgstr "" + +#: src/help.c:1031 +#, c-format +msgid "" +"\n" +"%s version\n" +msgstr "" + +#: src/help.c:1032 +#, c-format +msgid "" +"%s --version\n" +"\n" +msgstr "" + +#: src/help.c:1038 +#, c-format +msgid "" +"\n" +"%s catchup -b catchup-mode\n" +msgstr "" + +#: src/help.c:1041 +#, c-format +msgid " [--stream [-S slot-name]] [--temp-slot | --perm-slot]\n" +msgstr "" + +#: src/help.c:1050 +#, c-format +msgid "" +" [--help]\n" +"\n" +msgstr "" + +#: src/help.c:1052 +#, c-format +msgid " -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n" +msgstr "" + +#: src/help.c:1053 +#, c-format +msgid " --stream stream the transaction log (only supported mode)\n" +msgstr "" + +#: src/help.c:1056 +#, c-format +msgid " -P --perm-slot create permanent replication slot\n" +msgstr "" + +#: src/help.c:1062 +#, c-format +msgid " -x, --exclude-path=path_prefix files with path_prefix (relative to pgdata) will be\n" +msgstr "" + +#: src/help.c:1063 +#, c-format +msgid " excluded from catchup (can be used multiple times)\n" +msgstr "" + +#: src/help.c:1064 +#, c-format +msgid " Dangerous option! Use at your own risk!\n" +msgstr "" diff --git a/src/help.c b/src/help.c index a494ab20..8ebe734a 100644 --- a/src/help.c +++ b/src/help.c @@ -267,9 +267,9 @@ help_pg_probackup(void) { printf("\n"); if (PROGRAM_URL) - printf("Read the website for details. <%s>\n", PROGRAM_URL); + printf(_("Read the website for details <%s>.\n"), PROGRAM_URL); if (PROGRAM_EMAIL) - printf("Report bugs to <%s>.\n", PROGRAM_EMAIL); + printf(_("Report bugs to <%s>.\n"), PROGRAM_EMAIL); } } diff --git a/src/pg_probackup.c b/src/pg_probackup.c index c5ed1317..b9b3af0b 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -308,6 +308,7 @@ main(int argc, char *argv[]) init_config(&instance_config, instance_name); PROGRAM_NAME = get_progname(argv[0]); + set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_probackup")); PROGRAM_FULL_PATH = palloc0(MAXPGPATH); /* Get current time */ diff --git a/tests/Readme.md b/tests/Readme.md index 668552c9..ed1b22e0 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -31,7 +31,7 @@ Remote backup depends on key authentication to local machine via ssh as current export PGPROBACKUP_SSH_REMOTE=ON Run tests that are relied on advanced debugging features. For this mode, pg_probackup should be compiled without optimizations. For example: -CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests +CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls export PGPROBACKUP_GDB=ON diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index a8b4a64b..00b50d10 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -180,5 +180,5 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ssh-options] [--help] -Read the website for details. +Read the website for details . Report bugs to . diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out new file mode 100644 index 00000000..ee8da9a1 --- /dev/null +++ b/tests/expected/option_help_ru.out @@ -0,0 +1,184 @@ + +pg_probackup - утилита для управления резервным копированием/восстановлением базы данных PostgreSQL. + + pg_probackup help [COMMAND] + + pg_probackup version + + pg_probackup init -B backup-path + + pg_probackup set-config -B backup-path --instance=instance_name + [-D pgdata-path] + [--external-dirs=external-directories-paths] + [--log-level-console=log-level-console] + [--log-level-file=log-level-file] + [--log-filename=log-filename] + [--error-log-filename=error-log-filename] + [--log-directory=log-directory] + [--log-rotation-size=log-rotation-size] + [--log-rotation-age=log-rotation-age] + [--retention-redundancy=retention-redundancy] + [--retention-window=retention-window] + [--wal-depth=wal-depth] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] + [--archive-timeout=timeout] + [-d dbname] [-h host] [-p port] [-U username] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--restore-command=cmdline] [--archive-host=destination] + [--archive-port=port] [--archive-user=username] + [--help] + + pg_probackup set-backup -B backup-path --instance=instance_name + -i backup-id [--ttl=interval] [--expire-time=timestamp] + [--note=text] + [--help] + + pg_probackup show-config -B backup-path --instance=instance_name + [--format=format] + [--help] + + pg_probackup backup -B backup-path -b backup-mode --instance=instance_name + [-D pgdata-path] [-C] + [--stream [-S slot-name] [--temp-slot]] + [--backup-pg-log] [-j num-threads] [--progress] + [--no-validate] [--skip-block-validation] + [--external-dirs=external-directories-paths] + [--no-sync] + [--log-level-console=log-level-console] + [--log-level-file=log-level-file] + [--log-filename=log-filename] + [--error-log-filename=error-log-filename] + [--log-directory=log-directory] + [--log-rotation-size=log-rotation-size] + [--log-rotation-age=log-rotation-age] [--no-color] + [--delete-expired] [--delete-wal] [--merge-expired] + [--retention-redundancy=retention-redundancy] + [--retention-window=retention-window] + [--wal-depth=wal-depth] + [--compress] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] + [--archive-timeout=archive-timeout] + [-d dbname] [-h host] [-p port] [-U username] + [-w --no-password] [-W --password] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--ttl=interval] [--expire-time=timestamp] [--note=text] + [--help] + + pg_probackup restore -B backup-path --instance=instance_name + [-D pgdata-path] [-i backup-id] [-j num-threads] + [--recovery-target-time=time|--recovery-target-xid=xid + |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] + [--recovery-target-timeline=timeline] + [--recovery-target=immediate|latest] + [--recovery-target-name=target-name] + [--recovery-target-action=pause|promote|shutdown] + [--restore-command=cmdline] + [-R | --restore-as-replica] [--force] + [--primary-conninfo=primary_conninfo] + [-S | --primary-slot-name=slotname] + [--no-validate] [--skip-block-validation] + [-T OLDDIR=NEWDIR] [--progress] + [--external-mapping=OLDDIR=NEWDIR] + [--skip-external-dirs] [--no-sync] + [-I | --incremental-mode=none|checksum|lsn] + [--db-include | --db-exclude] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--archive-host=hostname] + [--archive-port=port] [--archive-user=username] + [--help] + + pg_probackup validate -B backup-path [--instance=instance_name] + [-i backup-id] [--progress] [-j num-threads] + [--recovery-target-time=time|--recovery-target-xid=xid + |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] + [--recovery-target-timeline=timeline] + [--recovery-target-name=target-name] + [--skip-block-validation] + [--help] + + pg_probackup checkdb [-B backup-path] [--instance=instance_name] + [-D pgdata-path] [--progress] [-j num-threads] + [--amcheck] [--skip-block-validation] + [--heapallindexed] [--checkunique] + [--help] + + pg_probackup show -B backup-path + [--instance=instance_name [-i backup-id]] + [--format=format] [--archive] + [--no-color] [--help] + + pg_probackup delete -B backup-path --instance=instance_name + [-j num-threads] [--progress] + [--retention-redundancy=retention-redundancy] + [--retention-window=retention-window] + [--wal-depth=wal-depth] + [-i backup-id | --delete-expired | --merge-expired | --status=backup_status] + [--delete-wal] + [--dry-run] [--no-validate] [--no-sync] + [--help] + + pg_probackup merge -B backup-path --instance=instance_name + -i backup-id [--progress] [-j num-threads] + [--no-validate] [--no-sync] + [--help] + + pg_probackup add-instance -B backup-path -D pgdata-path + --instance=instance_name + [--external-dirs=external-directories-paths] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + + pg_probackup del-instance -B backup-path + --instance=instance_name + [--help] + + pg_probackup archive-push -B backup-path --instance=instance_name + --wal-file-name=wal-file-name + [--wal-file-path=wal-file-path] + [-j num-threads] [--batch-size=batch_size] + [--archive-timeout=timeout] + [--no-ready-rename] [--no-sync] + [--overwrite] [--compress] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + + pg_probackup archive-get -B backup-path --instance=instance_name + --wal-file-path=wal-file-path + --wal-file-name=wal-file-name + [-j num-threads] [--batch-size=batch_size] + [--no-validate-wal] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + + pg_probackup catchup -b catchup-mode + --source-pgdata=path_to_pgdata_on_remote_server + --destination-pgdata=path_to_local_dir + [--stream [-S slot-name] [--temp-slot | --perm-slot]] + [-j num-threads] + [-T OLDDIR=NEWDIR] + [--exclude-path=path_prefix] + [-d dbname] [-h host] [-p port] [-U username] + [-w --no-password] [-W --password] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + +Подробнее читайте на сайте . +Сообщайте об ошибках в . diff --git a/tests/option.py b/tests/option.py index 023a0c2c..b57d7ef4 100644 --- a/tests/option.py +++ b/tests/option.py @@ -1,6 +1,7 @@ import unittest import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import locale module_name = 'option' @@ -226,3 +227,13 @@ class OptionTest(ProbackupTest, unittest.TestCase): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_help_6(self): + """help options""" + self.test_env['LC_ALL'] = 'ru_RU.utf-8' + with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: + self.assertEqual( + self.run_pb(["--help"]), + help_out.read().decode("utf-8") + ) diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 52b05105..a62ad4de 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -47,7 +47,7 @@ cd postgres # Go to postgres dir if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff fi -CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests +CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls make -s -j$(nproc) install #make -s -j$(nproc) -C 'src/common' install #make -s -j$(nproc) -C 'src/port' install From 68b77a06bca055c24ef00dee1896409a0beb923b Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Wed, 25 May 2022 14:45:25 +0300 Subject: [PATCH 06/28] [PBCKP-150] Reading buffer is flushed each time we verify the checksum. (#487) The race condition is covered with a unit-test, the buffer is flushed now so each of 300 reads requests the data from the disc. --- .travis.yml | 1 + src/data.c | 2 ++ tests/Readme.md | 2 ++ tests/__init__.py | 8 ++++- tests/time_consuming.py | 76 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 tests/time_consuming.py diff --git a/.travis.yml b/.travis.yml index 8e325c64..26b2bc4e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,6 +47,7 @@ env: # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=retention # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=time_consuming jobs: allow_failures: diff --git a/src/data.c b/src/data.c index 052e1748..e5a55112 100644 --- a/src/data.c +++ b/src/data.c @@ -349,6 +349,8 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, Assert(false); } } + /* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */ + fflush(in); } /* diff --git a/tests/Readme.md b/tests/Readme.md index ed1b22e0..500ed7c7 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -41,6 +41,8 @@ Run suit of basic simple tests: Run ptrack tests: export PG_PROBACKUP_PTRACK=ON +Run long (time consuming) tests: + export PG_PROBACKUP_LONG=ON Usage: sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope diff --git a/tests/__init__.py b/tests/__init__.py index 55d6ea9b..79537ad7 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -7,7 +7,7 @@ from . import init, merge, option, show, compatibility, \ compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \ cfs_validate_backup, auth_test, time_stamp, logging, \ locking, remote, external, config, checkdb, set_backup, incr_restore, \ - catchup, CVE_2018_1058 + catchup, CVE_2018_1058, time_consuming def load_tests(loader, tests, pattern): @@ -21,6 +21,12 @@ def load_tests(loader, tests, pattern): if os.environ['PG_PROBACKUP_PTRACK'] == 'ON': suite.addTests(loader.loadTestsFromModule(ptrack)) + # PG_PROBACKUP_LONG section for tests that are long + # by design e.g. they contain loops, sleeps and so on + if 'PG_PROBACKUP_LONG' in os.environ: + if os.environ['PG_PROBACKUP_LONG'] == 'ON': + suite.addTests(loader.loadTestsFromModule(time_consuming)) + # suite.addTests(loader.loadTestsFromModule(auth_test)) suite.addTests(loader.loadTestsFromModule(archive)) suite.addTests(loader.loadTestsFromModule(backup)) diff --git a/tests/time_consuming.py b/tests/time_consuming.py new file mode 100644 index 00000000..396ab716 --- /dev/null +++ b/tests/time_consuming.py @@ -0,0 +1,76 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest +import subprocess +from time import sleep + +module_name = 'time_consuming' + +class TimeConsumingTests(ProbackupTest, unittest.TestCase): + def test_pbckp150(self): + """ + https://jira.postgrespro.ru/browse/PBCKP-150 + create a node filled with pgbench + create FULL backup followed by PTRACK backup + run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel + """ + # init node + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + node.append_conf('postgresql.conf', + """ + max_connections = 100 + wal_keep_size = 16000 + ptrack.map_size = 1 + shared_preload_libraries='ptrack' + log_statement = 'none' + fsync = off + log_checkpoints = on + autovacuum = off + """) + + # init probackup and add an instance + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + # run the node and init ptrack + node.slow_start() + node.safe_psql("postgres", "CREATE EXTENSION ptrack") + # populate it with pgbench + node.pgbench_init(scale=5) + + # FULL backup followed by PTRACK backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + + # run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel + nBenchDuration = 30 + pgbench = node.pgbench(options=['-c', '20', '-j', '8', '-T', str(nBenchDuration)]) + with open('/tmp/pbckp150vacuum.sql', 'w') as f: + f.write('VACUUM (FULL) pgbench_accounts, pgbench_tellers, pgbench_history; SELECT pg_sleep(1);\n') + pgbenchval = node.pgbench(options=['-c', '1', '-f', '/tmp/pbckp150vacuum.sql', '-T', str(nBenchDuration)]) + + # several PTRACK backups + for i in range(nBenchDuration): + print("[{}] backing up PTRACK diff...".format(i+1)) + self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE']) + sleep(0.1) + # if the activity pgbench has finished, stop backing up + if pgbench.poll() is not None: + break + + pgbench.kill() + pgbenchval.kill() + pgbench.wait() + pgbenchval.wait() + + backups = self.show_pb(backup_dir, 'node') + for b in backups: + self.assertEqual("OK", b['status']) + + # Clean after yourself + self.del_test_dir(module_name, fname) From 0b5b37e8930e75793b23e0829d2f57cc5a13a34d Mon Sep 17 00:00:00 2001 From: asavchkov <79832668+asavchkov@users.noreply.github.com> Date: Thu, 26 May 2022 19:53:01 +0700 Subject: [PATCH 07/28] Add a workflow to build and test probackup on Windows (#484) * Add a workflow to build and test probackup on Windows * [PBCKP-149] fix test_basic_validate_nullified_heap_page_backup for windows Co-authored-by: Alexey Savchkov Co-authored-by: Mikhail A. Kulagin --- .github/workflows/build.yml | 94 +++++++++++++++++++++++++++++++++ gen_probackup_project.pl | 12 ++--- tests/helpers/ptrack_helpers.py | 6 +-- tests/validate.py | 5 +- 4 files changed, 104 insertions(+), 13 deletions(-) create mode 100644 .github/workflows/build.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000..ab1a5888 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,94 @@ +name: Build Probackup + +on: + push: + branches: + - "**" + # Runs triggered by pull requests are disabled to prevent executing potentially unsafe code from public pull requests + # pull_request: + # branches: + # - main + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + + build-win2019: + + runs-on: + - windows-2019 + + env: + zlib_dir: C:\dep\zlib + + steps: + + - uses: actions/checkout@v2 + + - name: Install pacman packages + run: | + $env:PATH += ";C:\msys64\usr\bin" + pacman -S --noconfirm --needed bison flex + + - name: Make zlib + run: | + git clone -b v1.2.11 --depth 1 https://github.com/madler/zlib.git + cd zlib + cmake -DCMAKE_INSTALL_PREFIX:PATH=C:\dep\zlib -G "Visual Studio 16 2019" . + cmake --build . --config Release --target ALL_BUILD + cmake --build . --config Release --target INSTALL + copy C:\dep\zlib\lib\zlibstatic.lib C:\dep\zlib\lib\zdll.lib + copy C:\dep\zlib\bin\zlib.dll C:\dep\zlib\lib + + - name: Get Postgres sources + run: git clone -b REL_14_STABLE https://github.com/postgres/postgres.git + + # Copy ptrack to contrib to build the ptrack extension + # Convert line breaks in the patch file to LF otherwise the patch doesn't apply + - name: Get Ptrack sources + run: | + git clone -b master --depth 1 https://github.com/postgrespro/ptrack.git + Copy-Item -Path ptrack -Destination postgres\contrib -Recurse + (Get-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Raw).Replace("`r`n","`n") | Set-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Force -NoNewline + cd postgres + git apply -3 ../ptrack/patches/REL_14_STABLE-ptrack-core.diff + + - name: Build Postgres + run: | + $env:PATH += ";C:\msys64\usr\bin" + cd postgres\src\tools\msvc + (Get-Content config_default.pl) -Replace "zlib *=>(.*?)(?=,? *#)", "zlib => '${{ env.zlib_dir }}'" | Set-Content config.pl + cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && .\build.bat" + + - name: Build Probackup + run: cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && perl .\gen_probackup_project.pl `"${{ github.workspace }}`"\postgres" + + - name: Install Postgres + run: | + cd postgres + src\tools\msvc\install.bat postgres_install + + - name: Install Testgres + run: | + git clone -b no-port-for --single-branch --depth 1 https://github.com/postgrespro/testgres.git + cd testgres + python setup.py install + + # Grant the Github runner user full control of the workspace for initdb to successfully process the data folder + - name: Test Probackup + run: | + icacls.exe "${{ github.workspace }}" /grant "${env:USERNAME}:(OI)(CI)F" + $env:PATH += ";${{ github.workspace }}\postgres\postgres_install\lib;${{ env.zlib_dir }}\lib" + $Env:LC_MESSAGES = "English" + $Env:PG_CONFIG = "${{ github.workspace }}\postgres\postgres_install\bin\pg_config.exe" + $Env:PGPROBACKUPBIN = "${{ github.workspace }}\postgres\Release\pg_probackup\pg_probackup.exe" + $Env:PG_PROBACKUP_PTRACK = "ON" + If (!$Env:MODE -Or $Env:MODE -Eq "basic") { + $Env:PG_PROBACKUP_TEST_BASIC = "ON" + python -m unittest -v tests + python -m unittest -v tests.init + } else { + python -m unittest -v tests.$Env:MODE + } + diff --git a/gen_probackup_project.pl b/gen_probackup_project.pl index c24db122..8143b7d0 100644 --- a/gen_probackup_project.pl +++ b/gen_probackup_project.pl @@ -13,11 +13,11 @@ if (($#ARGV+1)==1) { $pgsrc = shift @ARGV; if($pgsrc eq "--help"){ - print STDERR "Usage $0 pg-source-dir \n"; - print STDERR "Like this: \n"; - print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro \n"; - print STDERR "May be need input this before: \n"; - print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall\" amd64\n"; + print STDERR "Usage $0 pg-source-dir\n"; + print STDERR "Like this:\n"; + print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro\n"; + print STDERR "May need to run this first:\n"; + print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat\" amd64\n"; exit 1; } } @@ -133,7 +133,7 @@ sub build_pgprobackup unless (-d 'src/tools/msvc' && -d 'src'); # my $vsVersion = DetermineVisualStudioVersion(); - my $vsVersion = '12.00'; + my $vsVersion = '16.00'; $solution = CreateSolution($vsVersion, $config); diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 3b14b717..ffb87c5e 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -89,11 +89,7 @@ def dir_files(base_dir): def is_enterprise(): # pg_config --help - if os.name == 'posix': - cmd = [os.environ['PG_CONFIG'], '--help'] - - elif os.name == 'nt': - cmd = [[os.environ['PG_CONFIG']], ['--help']] + cmd = [os.environ['PG_CONFIG'], '--help'] p = subprocess.Popen( cmd, diff --git a/tests/validate.py b/tests/validate.py index 0b04d92f..e6282638 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -2,6 +2,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException from datetime import datetime, timedelta +from pathlib import Path import subprocess from sys import exit import time @@ -58,7 +59,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase): with open(log_file_path) as f: log_content = f.read() self.assertIn( - 'File: "{0}" blknum 1, empty page'.format(file), + 'File: "{0}" blknum 1, empty page'.format(Path(file).as_posix()), log_content, 'Failed to detect nullified block') @@ -4247,4 +4248,4 @@ class ValidateTest(ProbackupTest, unittest.TestCase): # 715 MAXALIGN(header.compressed_size), in); # 716 if (read_len != MAXALIGN(header.compressed_size)) # -> 717 elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d", -# 718 blknum, file->path, read_len, header.compressed_size); \ No newline at end of file +# 718 blknum, file->path, read_len, header.compressed_size); From 7be2e738a923bd65026cd7c95150d5a67d0ec228 Mon Sep 17 00:00:00 2001 From: avaness Date: Fri, 27 May 2022 18:56:38 +0300 Subject: [PATCH 08/28] PBCKP-145: added check of unlogged table is restored as empty table (#490) --- tests/exclude.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/tests/exclude.py b/tests/exclude.py index b98a483d..2c492588 100644 --- a/tests/exclude.py +++ b/tests/exclude.py @@ -203,8 +203,10 @@ class ExcludeTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_exclude_unlogged_tables_2(self): """ - make node, create unlogged, take FULL, check - that unlogged was not backed up + 1. make node, create unlogged, take FULL, DELTA, PAGE, + check that unlogged table files was not backed up + 2. restore FULL, DELTA, PAGE to empty db, + ensure unlogged table exist and is epmty """ fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -220,6 +222,8 @@ class ExcludeTest(ProbackupTest, unittest.TestCase): self.set_archiving(backup_dir, 'node', node) node.slow_start() + backup_ids = [] + for backup_type in ['full', 'delta', 'page']: if backup_type == 'full': @@ -231,14 +235,16 @@ class ExcludeTest(ProbackupTest, unittest.TestCase): 'postgres', 'insert into test select generate_series(0,20050000)::text') - rel_path = node.safe_psql( + rel_path = node.execute( 'postgres', - "select pg_relation_filepath('test')").decode('utf-8').rstrip() + "select pg_relation_filepath('test')")[0][0] backup_id = self.backup_node( backup_dir, 'node', node, backup_type=backup_type, options=['--stream']) + backup_ids.append(backup_id) + filelist = self.get_backup_filelist( backup_dir, 'node', backup_id) @@ -258,9 +264,25 @@ class ExcludeTest(ProbackupTest, unittest.TestCase): rel_path + '.3', filelist, "Unlogged table was not excluded") + # ensure restoring retrieves back only empty unlogged table + for backup_id in backup_ids: + node.stop() + node.cleanup() + + self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + + node.slow_start() + + self.assertEqual( + node.execute( + 'postgres', + 'select count(*) from test')[0][0], + 0) + # Clean after yourself self.del_test_dir(module_name, fname) + # @unittest.skip("skip") def test_exclude_log_dir(self): """ From 884e8b09f315a7ff9b53bea6f8395b44d0ed22f2 Mon Sep 17 00:00:00 2001 From: dlepikhova <43872363+dlepikhova@users.noreply.github.com> Date: Wed, 1 Jun 2022 12:49:09 +0500 Subject: [PATCH 09/28] [pbckp-128] dry-run option for catchup (#477) * Added dry-run option for catchup. Run catchup without affect on the files and WAL --- src/catchup.c | 82 +++++++++++------- src/help.c | 4 + tests/catchup.py | 154 +++++++++++++++++++++++++++++++++ tests/expected/option_help.out | 1 + travis/run_tests.sh | 9 ++ 5 files changed, 219 insertions(+), 31 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index 1b8f8084..3c522afb 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -2,7 +2,7 @@ * * catchup.c: sync DB cluster * - * Copyright (c) 2021, Postgres Professional + * Copyright (c) 2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -507,16 +507,20 @@ catchup_multithreaded_copy(int num_threads, /* Run threads */ thread_interrupted = false; threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); - for (i = 0; i < num_threads; i++) + if (!dry_run) { - elog(VERBOSE, "Start thread num: %i", i); - pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i])); + for (i = 0; i < num_threads; i++) + { + elog(VERBOSE, "Start thread num: %i", i); + pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i])); + } } /* Wait threads */ for (i = 0; i < num_threads; i++) { - pthread_join(threads[i], NULL); + if (!dry_run) + pthread_join(threads[i], NULL); all_threads_successful &= threads_args[i].completed; transfered_bytes_result += threads_args[i].transfered_bytes; } @@ -706,9 +710,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Start stream replication */ join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); - fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); - start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, - current.start_lsn, current.tli, false); + if (!dry_run) + { + fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); + start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, + current.start_lsn, current.tli, false); + } + else + elog(INFO, "WAL streaming skipping with --dry-run option"); source_filelist = parray_new(); @@ -779,9 +788,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Build the page map from ptrack information */ make_pagemap_from_ptrack_2(source_filelist, source_conn, - source_node_info.ptrack_schema, - source_node_info.ptrack_version_num, - dest_redo.lsn); + source_node_info.ptrack_schema, + source_node_info.ptrack_version_num, + dest_redo.lsn); time(&end_time); elog(INFO, "Pagemap successfully extracted, time elapsed: %.0f sec", difftime(end_time, start_time)); @@ -820,9 +829,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char dirpath[MAXPGPATH]; join_path_components(dirpath, dest_pgdata, file->rel_path); - elog(VERBOSE, "Create directory '%s'", dirpath); - fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); + if (!dry_run) + fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); } else { @@ -853,15 +862,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", linked_path, to_path); - /* create tablespace directory */ - if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0) - elog(ERROR, "Could not create tablespace directory \"%s\": %s", - linked_path, strerror(errno)); + if (!dry_run) + { + /* create tablespace directory */ + if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0) + elog(ERROR, "Could not create tablespace directory \"%s\": %s", + linked_path, strerror(errno)); - /* create link to linked_path */ - if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0) - elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", - linked_path, to_path, strerror(errno)); + /* create link to linked_path */ + if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0) + elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", + linked_path, to_path, strerror(errno)); + } } } @@ -930,7 +942,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char fullpath[MAXPGPATH]; join_path_components(fullpath, dest_pgdata, file->rel_path); - fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); + if (!dry_run) + { + fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); + } elog(VERBOSE, "Deleted file \"%s\"", fullpath); /* shrink dest pgdata list */ @@ -961,7 +976,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, catchup_isok = transfered_datafiles_bytes != -1; /* at last copy control file */ - if (catchup_isok) + if (catchup_isok && !dry_run) { char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; @@ -972,7 +987,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, transfered_datafiles_bytes += source_pg_control_file->size; } - if (!catchup_isok) + if (!catchup_isok && !dry_run) { char pretty_time[20]; char pretty_transfered_data_bytes[20]; @@ -1010,14 +1025,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_free(stop_backup_query_text); } - wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); + if (!dry_run) + wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); #if PG_VERSION_NUM >= 90600 /* Write backup_label */ Assert(stop_backup_result.backup_label_content != NULL); - pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", - stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, - NULL); + if (!dry_run) + { + pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + NULL); + } free(stop_backup_result.backup_label_content); stop_backup_result.backup_label_content = NULL; stop_backup_result.backup_label_content_len = 0; @@ -1040,6 +1059,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, #endif /* wait for end of wal streaming and calculate wal size transfered */ + if (!dry_run) { parray *wal_files_list = NULL; wal_files_list = parray_new(); @@ -1091,17 +1111,17 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } /* Sync all copied files unless '--no-sync' flag is used */ - if (sync_dest_files) + if (sync_dest_files && !dry_run) catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); else elog(WARNING, "Files are not synced to disk"); /* Cleanup */ - if (dest_filelist) + if (dest_filelist && !dry_run) { parray_walk(dest_filelist, pgFileFree); - parray_free(dest_filelist); } + parray_free(dest_filelist); parray_walk(source_filelist, pgFileFree); parray_free(source_filelist); pgFileFree(source_pg_control_file); diff --git a/src/help.c b/src/help.c index 8ebe734a..b22fa912 100644 --- a/src/help.c +++ b/src/help.c @@ -261,6 +261,7 @@ help_pg_probackup(void) printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); + printf(_(" [--dry-run]\n")); printf(_(" [--help]\n")); if ((PROGRAM_URL || PROGRAM_EMAIL)) @@ -1047,6 +1048,7 @@ help_catchup(void) printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); + printf(_(" [--dry-run]\n")); printf(_(" [--help]\n\n")); printf(_(" -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n")); @@ -1081,4 +1083,6 @@ help_catchup(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); + + printf(_(" --dry-run perform a trial run without any changes\n\n")); } diff --git a/tests/catchup.py b/tests/catchup.py index 8441deaa..a83755c5 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1455,3 +1455,157 @@ class CatchupTest(ProbackupTest, unittest.TestCase): dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) + +######################################### +# --dry-run +######################################### + def test_dry_run_catchup_full(self): + """ + Test dry-run option for full catchup + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do full catchup + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] + ) + + # compare data dirs before and after catchup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_dry_run_catchup_ptrack(self): + """ + Test dry-run option for catchup in incremental ptrack mode + """ + if not self.ptrack: + return unittest.skip('Skipped because ptrack support is disabled') + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + ptrack_enable = True, + initdb_params = ['--data-checksums'] + ) + src_pg.slow_start() + src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") + + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do incremental catchup + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] + ) + + # compare data dirs before and after cathup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_dry_run_catchup_delta(self): + """ + Test dry-run option for catchup in incremental delta mode + """ + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + initdb_params = ['--data-checksums'], + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do delta catchup + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', "--dry-run"] + ) + + # compare data dirs before and after cathup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + self.del_test_dir(module_name, self.fname) + diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 00b50d10..8a1de1f6 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -178,6 +178,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] Read the website for details . diff --git a/travis/run_tests.sh b/travis/run_tests.sh index a62ad4de..37614f97 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -100,11 +100,20 @@ source pyenv/bin/activate pip3 install testgres echo "############### Testing:" +echo PG_PROBACKUP_PARANOIA=${PG_PROBACKUP_PARANOIA} +echo ARCHIVE_COMPRESSION=${ARCHIVE_COMPRESSION} +echo PGPROBACKUPBIN_OLD=${PGPROBACKUPBIN_OLD} +echo PGPROBACKUPBIN=${PGPROBACKUPBIN} +echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} +echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} +echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests python3 -m unittest -v tests.init else + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests.$MODE fi From 8bb0a618fb5608af098f12d04285e572054ad194 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 1 Jun 2022 10:59:19 +0300 Subject: [PATCH 10/28] Version 2.5.6 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 4cd65980..2c4c6103 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -338,7 +338,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.5" +#define PROGRAM_VERSION "2.5.6" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20501 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index 29cd93f4..96f0f344 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.5 \ No newline at end of file +pg_probackup 2.5.6 From 41855701c7033ff358de8b56822ce607dd3303c9 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Wed, 8 Jun 2022 00:38:51 +0300 Subject: [PATCH 11/28] [PBCKP-153] Added waldir option for location for the write-ahead log directory (-X, --waldir=WALDIR) --- src/dir.c | 30 +++++++++++++++++++++++++++++- src/help.c | 6 ++++++ src/merge.c | 2 +- src/pg_probackup.c | 17 +++++++++++++++++ src/pg_probackup.h | 5 ++++- src/restore.c | 2 +- 6 files changed, 58 insertions(+), 4 deletions(-) diff --git a/src/dir.c b/src/dir.c index 4ebe0939..ac794cee 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1036,13 +1036,20 @@ opt_externaldir_map(ConfigOption *opt, const char *arg) */ void create_data_directories(parray *dest_files, const char *data_dir, const char *backup_dir, - bool extract_tablespaces, bool incremental, fio_location location) + bool extract_tablespaces, bool incremental, fio_location location, + const char* waldir_path) { int i; parray *links = NULL; mode_t pg_tablespace_mode = DIR_PERMISSION; char to_path[MAXPGPATH]; + if (waldir_path && !dir_is_empty(waldir_path, location)) + { + elog(ERROR, "WAL directory location is not empty: \"%s\"", waldir_path); + } + + /* get tablespace map */ if (extract_tablespaces) { @@ -1107,6 +1114,27 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba /* skip external directory content */ if (dir->external_dir_num != 0) continue; + /* Create WAL directory and symlink if waldir_path is setting */ + if (waldir_path && strcmp(dir->rel_path, PG_XLOG_DIR) == 0) { + /* get full path to PG_XLOG_DIR */ + + join_path_components(to_path, data_dir, PG_XLOG_DIR); + + elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", + waldir_path, to_path); + + /* create tablespace directory from waldir_path*/ + fio_mkdir(waldir_path, pg_tablespace_mode, location); + + /* create link to linked_path */ + if (fio_symlink(waldir_path, to_path, incremental, location) < 0) + elog(ERROR, "Could not create symbolic link \"%s\": %s", + to_path, strerror(errno)); + + continue; + + + } /* tablespace_map exists */ if (links) diff --git a/src/help.c b/src/help.c index b22fa912..85894759 100644 --- a/src/help.c +++ b/src/help.c @@ -169,6 +169,7 @@ help_pg_probackup(void) printf(_(" [-T OLDDIR=NEWDIR] [--progress]\n")); printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n")); printf(_(" [--skip-external-dirs] [--no-sync]\n")); + printf(_(" [-X WALDIR | --waldir=WALDIR]\n")); printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n")); printf(_(" [--db-include | --db-exclude]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); @@ -435,6 +436,7 @@ help_restore(void) printf(_(" [-T OLDDIR=NEWDIR]\n")); printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n")); printf(_(" [--skip-external-dirs]\n")); + printf(_(" [-X WALDIR | --waldir=WALDIR]\n")); printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n")); printf(_(" [--db-include dbname | --db-exclude dbname]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); @@ -472,6 +474,10 @@ help_restore(void) printf(_(" relocate the external directory from OLDDIR to NEWDIR\n")); printf(_(" --skip-external-dirs do not restore all external directories\n")); + + printf(_(" -X, --waldir=WALDIR location for the write-ahead log directory\n")); + + printf(_("\n Incremental restore options:\n")); printf(_(" -I, --incremental-mode=none|checksum|lsn\n")); printf(_(" reuse valid pages available in PGDATA if they have not changed\n")); diff --git a/src/merge.c b/src/merge.c index ff39c251..1ce92bb4 100644 --- a/src/merge.c +++ b/src/merge.c @@ -614,7 +614,7 @@ merge_chain(InstanceState *instanceState, /* Create directories */ create_data_directories(dest_backup->files, full_database_dir, - dest_backup->root_dir, false, false, FIO_BACKUP_HOST); + dest_backup->root_dir, false, false, FIO_BACKUP_HOST, NULL); /* External directories stuff */ if (dest_backup->external_dir_str) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index b9b3af0b..2c8100b8 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -122,6 +122,7 @@ static parray *datname_include_list = NULL; /* arrays for --exclude-path's */ static parray *exclude_absolute_paths_list = NULL; static parray *exclude_relative_paths_list = NULL; +static char* waldir_path = NULL; /* checkdb options */ bool need_amcheck = false; @@ -238,6 +239,7 @@ static ConfigOption cmd_options[] = { 's', 160, "primary-conninfo", &primary_conninfo, SOURCE_CMD_STRICT }, { 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT }, { 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT }, + { 's', 'X', "waldir", &waldir_path, SOURCE_CMD_STRICT }, /* checkdb options */ { 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT }, { 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT }, @@ -754,6 +756,21 @@ main(int argc, char *argv[]) restore_params->partial_restore_type = INCLUDE; restore_params->partial_db_list = datname_include_list; } + + if (waldir_path) + { + /* clean up xlog directory name, check it's absolute */ + canonicalize_path(waldir_path); + if (!is_absolute_path(waldir_path)) + { + elog(ERROR, "WAL directory location must be an absolute path"); + } + if (strlen(waldir_path) > MAXPGPATH) + elog(ERROR, "Value specified to --waldir is too long"); + + } + restore_params->waldir = waldir_path; + } /* diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2c4c6103..13650be8 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -566,6 +566,8 @@ typedef struct pgRestoreParams /* options for partial restore */ PartialRestoreType partial_restore_type; parray *partial_db_list; + + char* waldir; } pgRestoreParams; /* Options needed for set-backup command */ @@ -1022,7 +1024,8 @@ extern void create_data_directories(parray *dest_files, const char *backup_dir, bool extract_tablespaces, bool incremental, - fio_location location); + fio_location location, + const char *waldir_path); extern void read_tablespace_map(parray *links, const char *backup_dir); extern void opt_tablespace_map(ConfigOption *opt, const char *arg); diff --git a/src/restore.c b/src/restore.c index d8d808a4..fbf0c039 100644 --- a/src/restore.c +++ b/src/restore.c @@ -801,7 +801,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, create_data_directories(dest_files, instance_config.pgdata, dest_backup->root_dir, backup_has_tblspc, params->incremental_mode != INCR_NONE, - FIO_DB_HOST); + FIO_DB_HOST, params->waldir); /* * Restore dest_backup external directories. From 48a2c835d1c12353e23e08b901beaf39695773f9 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Wed, 8 Jun 2022 17:40:49 +0300 Subject: [PATCH 12/28] [PBCKP-153] Added a test for the waldir option for the restore command --- tests/restore.py | 56 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/tests/restore.py b/tests/restore.py index bbdadeb2..668cff4f 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -3916,3 +3916,59 @@ class RestoreTest(ProbackupTest, unittest.TestCase): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_restore_with_waldir(self): + """recovery using tablespace-mapping option and page backup""" + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + + with node.connect("postgres") as con: + con.execute( + "CREATE TABLE tbl AS SELECT * " + "FROM generate_series(0,3) AS integer") + con.commit() + + # Full backup + backup_id = self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + + # Create waldir + waldir_path = os.path.join(node.base_dir, "waldir") + os.makedirs(waldir_path) + + # Test recovery from latest + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-X", "%s" % (waldir_path)]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + count = node.execute("postgres", "SELECT count(*) FROM tbl") + self.assertEqual(count[0][0], 4) + + # check pg_wal is symlink + if node.major_version >= 10: + wal_path=os.path.join(node.data_dir, "pg_wal") + else: + wal_path=os.path.join(node.data_dir, "pg_xlog") + + self.assertEqual(os.path.islink(wal_path), True) + + # Clean after yourself + self.del_test_dir(module_name, fname) From e72feb6813fa4862dbad12c657119c7bcfefe12b Mon Sep 17 00:00:00 2001 From: Sokolov Yura Date: Thu, 16 Jun 2022 09:26:02 +0300 Subject: [PATCH 13/28] rapid agent close + disable ssh control master. (#493) --- src/utils/file.c | 5 ++++- src/utils/remote.c | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index 7d1df554..7103c8f1 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -489,8 +489,10 @@ fio_disconnect(void) Assert(hdr.cop == FIO_DISCONNECTED); SYS_CHECK(close(fio_stdin)); SYS_CHECK(close(fio_stdout)); + SYS_CHECK(close(fio_stderr)); fio_stdin = 0; fio_stdout = 0; + fio_stderr = 0; wait_ssh(); } } @@ -3403,7 +3405,8 @@ fio_communicate(int in, int out) case FIO_DISCONNECT: hdr.cop = FIO_DISCONNECTED; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - break; + free(buf); + return; case FIO_GET_ASYNC_ERROR: fio_get_async_error_impl(out); break; diff --git a/src/utils/remote.c b/src/utils/remote.c index 2bfd24d1..046ebd81 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -147,6 +147,9 @@ bool launch_agent(void) ssh_argv[ssh_argc++] = "-o"; ssh_argv[ssh_argc++] = "Compression=no"; + ssh_argv[ssh_argc++] = "-o"; + ssh_argv[ssh_argc++] = "ControlMaster=no"; + ssh_argv[ssh_argc++] = "-o"; ssh_argv[ssh_argc++] = "LogLevel=error"; From acc8edcd62d399d972e9dab8df8ecd85dbeb0fa2 Mon Sep 17 00:00:00 2001 From: avaness Date: Thu, 16 Jun 2022 11:46:19 +0300 Subject: [PATCH 14/28] minor hotfix for OptionTest.test_help_6, OptionTest.test_version_2 and tests/Readme.md FAQ (#494) Co-authored-by: Ivan Lazarev --- tests/Readme.md | 31 ++++++++++++++++++++++++++++++- tests/expected/option_help_ru.out | 1 + tests/option.py | 2 +- 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/tests/Readme.md b/tests/Readme.md index 500ed7c7..f980b6ae 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -1,4 +1,4 @@ -[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup) +****[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup) ``` Note: For now these tests work on Linux and "kinda" work on Windows @@ -50,3 +50,32 @@ Usage: export PG_CONFIG=/path/to/pg_config python -m unittest [-v] tests[.specific_module][.class.test] ``` + +### Troubleshooting FAQ + +#### python test failures +1. Test failure reason like +``` +testgres.exceptions.QueryException ERROR: could not open extension control file "/home/avaness/postgres/postgres.build/share/extension/amcheck.control": No such file or directory +``` + +*Solution*: you have no `/contrib/` extensions installed + +```commandline +cd +make world install +``` + +2. Test failure + +``` +FAIL: test_help_6 (tests.option.OptionTest) +``` + +*Solution*: you didn't configure postgres build with `--enable-nls` + +```commandline +cd +make distclean + --enable-nls +``` diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index ee8da9a1..68afb82f 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -178,6 +178,7 @@ pg_probackup - утилита для управления резервным к [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] Подробнее читайте на сайте . diff --git a/tests/option.py b/tests/option.py index b57d7ef4..23aa97c8 100644 --- a/tests/option.py +++ b/tests/option.py @@ -24,7 +24,7 @@ class OptionTest(ProbackupTest, unittest.TestCase): """help options""" with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out: self.assertIn( - version_out.read().decode("utf-8"), + version_out.read().decode("utf-8").strip(), self.run_pb(["--version"]) ) From e11ca786b1a466aff773ebec5fae0b88692d140d Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Thu, 16 Jun 2022 12:02:27 +0300 Subject: [PATCH 15/28] [PBCKP-153] Changed expected/option_help.out and option_help_ru.out files for the tests.option.OptionTest.test_help_1 and help_6 --- tests/expected/option_help.out | 1 + tests/expected/option_help_ru.out | 2 ++ 2 files changed, 3 insertions(+) diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 8a1de1f6..65916425 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -86,6 +86,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [-T OLDDIR=NEWDIR] [--progress] [--external-mapping=OLDDIR=NEWDIR] [--skip-external-dirs] [--no-sync] + [-X WALDIR | --waldir=WALDIR] [-I | --incremental-mode=none|checksum|lsn] [--db-include | --db-exclude] [--remote-proto] [--remote-host] diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index ee8da9a1..2e90eb29 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -86,6 +86,7 @@ pg_probackup - утилита для управления резервным к [-T OLDDIR=NEWDIR] [--progress] [--external-mapping=OLDDIR=NEWDIR] [--skip-external-dirs] [--no-sync] + [-X WALDIR | --waldir=WALDIR] [-I | --incremental-mode=none|checksum|lsn] [--db-include | --db-exclude] [--remote-proto] [--remote-host] @@ -178,6 +179,7 @@ pg_probackup - утилита для управления резервным к [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] Подробнее читайте на сайте . From 7e16642b663ccf66507b5fa7a270c7063db44633 Mon Sep 17 00:00:00 2001 From: avaness Date: Wed, 22 Jun 2022 12:54:20 +0300 Subject: [PATCH 16/28] [PBCKP-165] get_control_value() int64 buffer vulnerability fix (#496) * [PBCKP-165] get_control_value() int64 buffer vulnerability fix - added output buffer size limit check - splitted to get_get_control_value_str() & get_control_value_int64() api - included for windows build Co-authored-by: Ivan Lazarev --- src/catalog.c | 32 ++++++------ src/dir.c | 125 +++++++++++++++++++++++++-------------------- src/pg_probackup.h | 5 +- 3 files changed, 90 insertions(+), 72 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index b4ed8c18..9d817913 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1084,15 +1084,15 @@ get_backup_filelist(pgBackup *backup, bool strict) COMP_FILE_CRC32(true, content_crc, buf, strlen(buf)); - get_control_value(buf, "path", path, NULL, true); - get_control_value(buf, "size", NULL, &write_size, true); - get_control_value(buf, "mode", NULL, &mode, true); - get_control_value(buf, "is_datafile", NULL, &is_datafile, true); - get_control_value(buf, "is_cfs", NULL, &is_cfs, false); - get_control_value(buf, "crc", NULL, &crc, true); - get_control_value(buf, "compress_alg", compress_alg_string, NULL, false); - get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false); - get_control_value(buf, "dbOid", NULL, &dbOid, false); + get_control_value_str(buf, "path", path, sizeof(path),true); + get_control_value_int64(buf, "size", &write_size, true); + get_control_value_int64(buf, "mode", &mode, true); + get_control_value_int64(buf, "is_datafile", &is_datafile, true); + get_control_value_int64(buf, "is_cfs", &is_cfs, false); + get_control_value_int64(buf, "crc", &crc, true); + get_control_value_str(buf, "compress_alg", compress_alg_string, sizeof(compress_alg_string), false); + get_control_value_int64(buf, "external_dir_num", &external_dir_num, false); + get_control_value_int64(buf, "dbOid", &dbOid, false); file = pgFileInit(path); file->write_size = (int64) write_size; @@ -1107,28 +1107,28 @@ get_backup_filelist(pgBackup *backup, bool strict) /* * Optional fields */ - if (get_control_value(buf, "linked", linked, NULL, false) && linked[0]) + if (get_control_value_str(buf, "linked", linked, sizeof(linked), false) && linked[0]) { file->linked = pgut_strdup(linked); canonicalize_path(file->linked); } - if (get_control_value(buf, "segno", NULL, &segno, false)) + if (get_control_value_int64(buf, "segno", &segno, false)) file->segno = (int) segno; - if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false)) + if (get_control_value_int64(buf, "n_blocks", &n_blocks, false)) file->n_blocks = (int) n_blocks; - if (get_control_value(buf, "n_headers", NULL, &n_headers, false)) + if (get_control_value_int64(buf, "n_headers", &n_headers, false)) file->n_headers = (int) n_headers; - if (get_control_value(buf, "hdr_crc", NULL, &hdr_crc, false)) + if (get_control_value_int64(buf, "hdr_crc", &hdr_crc, false)) file->hdr_crc = (pg_crc32) hdr_crc; - if (get_control_value(buf, "hdr_off", NULL, &hdr_off, false)) + if (get_control_value_int64(buf, "hdr_off", &hdr_off, false)) file->hdr_off = hdr_off; - if (get_control_value(buf, "hdr_size", NULL, &hdr_size, false)) + if (get_control_value_int64(buf, "hdr_size", &hdr_size, false)) file->hdr_size = (int) hdr_size; parray_append(files, file); diff --git a/src/dir.c b/src/dir.c index 4ebe0939..e76122ae 100644 --- a/src/dir.c +++ b/src/dir.c @@ -8,6 +8,7 @@ *------------------------------------------------------------------------- */ +#include #include "pg_probackup.h" #include "utils/file.h" @@ -130,6 +131,9 @@ static void opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, const char *type); static void cleanup_tablespace(const char *path); +static void control_string_bad_format(const char* str); + + /* Tablespace mapping */ static TablespaceList tablespace_dirs = {NULL, NULL}; /* Extra directories mapping */ @@ -1467,7 +1471,7 @@ get_external_remap(char *current_dir) return current_dir; } -/* Parsing states for get_control_value() */ +/* Parsing states for get_control_value_str() */ #define CONTROL_WAIT_NAME 1 #define CONTROL_INNAME 2 #define CONTROL_WAIT_COLON 3 @@ -1481,26 +1485,62 @@ get_external_remap(char *current_dir) * The line has the following format: * {"name1":"value1", "name2":"value2"} * - * The value will be returned to "value_str" as string if it is not NULL. If it - * is NULL the value will be returned to "value_int64" as int64. + * The value will be returned in "value_int64" as int64. + * + * Returns true if the value was found in the line and parsed. + */ +bool +get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory) +{ + + char buf_int64[32]; + + assert(value_int64); + + /* Set default value */ + *value_int64 = 0; + + if (!get_control_value_str(str, name, buf_int64, sizeof(buf_int64), is_mandatory)) + return false; + + if (!parse_int64(buf_int64, value_int64, 0)) + { + /* We assume that too big value is -1 */ + if (errno == ERANGE) + *value_int64 = BYTES_INVALID; + else + control_string_bad_format(str); + return false; + } + + return true; +} + +/* + * Get value from json-like line "str" of backup_content.control file. + * + * The line has the following format: + * {"name1":"value1", "name2":"value2"} + * + * The value will be returned to "value_str" as string. * * Returns true if the value was found in the line. */ + bool -get_control_value(const char *str, const char *name, - char *value_str, int64 *value_int64, bool is_mandatory) +get_control_value_str(const char *str, const char *name, + char *value_str, size_t value_str_size, bool is_mandatory) { int state = CONTROL_WAIT_NAME; char *name_ptr = (char *) name; char *buf = (char *) str; - char buf_int64[32], /* Buffer for "value_int64" */ - *buf_int64_ptr = buf_int64; + char *const value_str_start = value_str; - /* Set default values */ - if (value_str) - *value_str = '\0'; - else if (value_int64) - *value_int64 = 0; + assert(value_str); + assert(value_str_size > 0); + + /* Set default value */ + *value_str = '\0'; while (*buf) { @@ -1510,7 +1550,7 @@ get_control_value(const char *str, const char *name, if (*buf == '"') state = CONTROL_INNAME; else if (IsAlpha(*buf)) - goto bad_format; + control_string_bad_format(str); break; case CONTROL_INNAME: /* Found target field. Parse value. */ @@ -1529,57 +1569,32 @@ get_control_value(const char *str, const char *name, if (*buf == ':') state = CONTROL_WAIT_VALUE; else if (!IsSpace(*buf)) - goto bad_format; + control_string_bad_format(str); break; case CONTROL_WAIT_VALUE: if (*buf == '"') { state = CONTROL_INVALUE; - buf_int64_ptr = buf_int64; } else if (IsAlpha(*buf)) - goto bad_format; + control_string_bad_format(str); break; case CONTROL_INVALUE: /* Value was parsed, exit */ if (*buf == '"') { - if (value_str) - { - *value_str = '\0'; - } - else if (value_int64) - { - /* Length of buf_uint64 should not be greater than 31 */ - if (buf_int64_ptr - buf_int64 >= 32) - elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s", - name, str, DATABASE_FILE_LIST); - - *buf_int64_ptr = '\0'; - if (!parse_int64(buf_int64, value_int64, 0)) - { - /* We assume that too big value is -1 */ - if (errno == ERANGE) - *value_int64 = BYTES_INVALID; - else - goto bad_format; - } - } - + *value_str = '\0'; return true; } else { - if (value_str) - { - *value_str = *buf; - value_str++; - } - else - { - *buf_int64_ptr = *buf; - buf_int64_ptr++; + /* verify if value_str not exceeds value_str_size limits */ + if (value_str - value_str_start >= value_str_size - 1) { + elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s", + name, str, DATABASE_FILE_LIST); } + *value_str = *buf; + value_str++; } break; case CONTROL_WAIT_NEXT_NAME: @@ -1596,18 +1611,20 @@ get_control_value(const char *str, const char *name, /* There is no close quotes */ if (state == CONTROL_INNAME || state == CONTROL_INVALUE) - goto bad_format; + control_string_bad_format(str); /* Did not find target field */ if (is_mandatory) elog(ERROR, "field \"%s\" is not found in the line %s of the file %s", name, str, DATABASE_FILE_LIST); return false; +} -bad_format: - elog(ERROR, "%s file has invalid format in line %s", - DATABASE_FILE_LIST, str); - return false; /* Make compiler happy */ +static void +control_string_bad_format(const char* str) +{ + elog(ERROR, "%s file has invalid format in line %s", + DATABASE_FILE_LIST, str); } /* @@ -1841,8 +1858,8 @@ read_database_map(pgBackup *backup) db_map_entry *db_entry = (db_map_entry *) pgut_malloc(sizeof(db_map_entry)); - get_control_value(buf, "dbOid", NULL, &dbOid, true); - get_control_value(buf, "datname", datname, NULL, true); + get_control_value_int64(buf, "dbOid", &dbOid, true); + get_control_value_str(buf, "datname", datname, sizeof(datname), true); db_entry->dbOid = dbOid; db_entry->datname = pgut_strdup(datname); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2c4c6103..7eb62466 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1010,8 +1010,9 @@ extern CompressAlg parse_compress_alg(const char *arg); extern const char* deparse_compress_alg(int alg); /* in dir.c */ -extern bool get_control_value(const char *str, const char *name, - char *value_str, int64 *value_int64, bool is_mandatory); +extern bool get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory); +extern bool get_control_value_str(const char *str, const char *name, + char *value_str, size_t value_str_size, bool is_mandatory); extern void dir_list_file(parray *files, const char *root, bool exclude, bool follow_symlink, bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location); From 039e3c86786737264366b9a8bfcc675e10afeec4 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Wed, 22 Jun 2022 14:36:55 +0500 Subject: [PATCH 17/28] Add checking enable-nls option in configure For correct work test_help_6 we need skip this test if PostgreSQL configured without --enable-nls --- tests/helpers/ptrack_helpers.py | 14 ++++++++++++++ tests/option.py | 16 ++++++++++------ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index ffb87c5e..f2d31616 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -101,6 +101,19 @@ def is_enterprise(): else: return False +def enable_nls(): + cmd = [os.environ['PG_CONFIG'], '--configure'] + + p = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + if b'enable-nls' in p.communicate()[0]: + return True + else: + return False + class ProbackupException(Exception): def __init__(self, message, cmd): @@ -147,6 +160,7 @@ def slow_start(self, replica=False): class ProbackupTest(object): # Class attributes enterprise = is_enterprise() + enable_nls = enable_nls() def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) diff --git a/tests/option.py b/tests/option.py index 23aa97c8..88e72ffd 100644 --- a/tests/option.py +++ b/tests/option.py @@ -231,9 +231,13 @@ class OptionTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_help_6(self): """help options""" - self.test_env['LC_ALL'] = 'ru_RU.utf-8' - with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: - self.assertEqual( - self.run_pb(["--help"]), - help_out.read().decode("utf-8") - ) + if ProbackupTest.enable_nls: + self.test_env['LC_ALL'] = 'ru_RU.utf-8' + with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: + self.assertEqual( + self.run_pb(["--help"]), + help_out.read().decode("utf-8") + ) + else: + return unittest.skip( + 'You need configure PostgreSQL with --enabled-nls option for this test') From 61cd6209772c8ac8ec34a80444a074f66650a4bf Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Fri, 24 Jun 2022 11:36:56 +0300 Subject: [PATCH 18/28] [PBCKP-153] global variable waldir_path renamed to gl_waldir_path --- src/pg_probackup.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 2c8100b8..193cd9c3 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -122,7 +122,7 @@ static parray *datname_include_list = NULL; /* arrays for --exclude-path's */ static parray *exclude_absolute_paths_list = NULL; static parray *exclude_relative_paths_list = NULL; -static char* waldir_path = NULL; +static char* gl_waldir_path = NULL; /* checkdb options */ bool need_amcheck = false; @@ -239,7 +239,7 @@ static ConfigOption cmd_options[] = { 's', 160, "primary-conninfo", &primary_conninfo, SOURCE_CMD_STRICT }, { 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT }, { 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT }, - { 's', 'X', "waldir", &waldir_path, SOURCE_CMD_STRICT }, + { 's', 'X', "waldir", &gl_waldir_path, SOURCE_CMD_STRICT }, /* checkdb options */ { 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT }, { 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT }, @@ -757,19 +757,19 @@ main(int argc, char *argv[]) restore_params->partial_db_list = datname_include_list; } - if (waldir_path) + if (gl_waldir_path) { /* clean up xlog directory name, check it's absolute */ - canonicalize_path(waldir_path); - if (!is_absolute_path(waldir_path)) + canonicalize_path(gl_waldir_path); + if (!is_absolute_path(gl_waldir_path)) { elog(ERROR, "WAL directory location must be an absolute path"); } - if (strlen(waldir_path) > MAXPGPATH) + if (strlen(gl_waldir_path) > MAXPGPATH) elog(ERROR, "Value specified to --waldir is too long"); } - restore_params->waldir = waldir_path; + restore_params->waldir = gl_waldir_path; } From 55d3fa8979ec00eda90e36594a6976ae739d2876 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Wed, 29 Jun 2022 11:08:05 +0500 Subject: [PATCH 19/28] Rename enable_nls() function in ptrack_helpers.p is_nls_enabled() --- tests/helpers/ptrack_helpers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index f2d31616..b5f1fe5b 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -101,7 +101,7 @@ def is_enterprise(): else: return False -def enable_nls(): +def is_nls_enabled(): cmd = [os.environ['PG_CONFIG'], '--configure'] p = subprocess.Popen( @@ -160,7 +160,7 @@ def slow_start(self, replica=False): class ProbackupTest(object): # Class attributes enterprise = is_enterprise() - enable_nls = enable_nls() + enable_nls = is_nls_enabled() def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) From f544da1ecde143c57bda4205470267bed1a6056e Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Wed, 29 Jun 2022 22:17:31 +0500 Subject: [PATCH 20/28] Shorthand return-expression --- tests/helpers/ptrack_helpers.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b5f1fe5b..18fb3fc2 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -109,10 +109,7 @@ def is_nls_enabled(): stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - if b'enable-nls' in p.communicate()[0]: - return True - else: - return False + return b'enable-nls' in p.communicate()[0] class ProbackupException(Exception): From 32aae17928d165be7a8a19b015b87f8b885bc5dd Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Thu, 30 Jun 2022 02:28:29 +0300 Subject: [PATCH 21/28] [PBCKP-220] minor updates for gdb checks, checking CI tests --- tests/archive.py | 2 ++ tests/delta.py | 7 ++----- tests/helpers/ptrack_helpers.py | 17 +++++++++++++++-- tests/pgpro2068.py | 2 ++ tests/ptrack.py | 2 ++ tests/replica.py | 30 ++++++++++-------------------- 6 files changed, 33 insertions(+), 27 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index 22b9d869..e01b7d37 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -290,6 +290,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase): Check pg_stop_backup_timeout, libpq-timeout requested. Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/delta.py b/tests/delta.py index f365b6f9..82fb714f 100644 --- a/tests/delta.py +++ b/tests/delta.py @@ -472,11 +472,8 @@ class DeltaTest(ProbackupTest, unittest.TestCase): make node, make full and delta stream backups, restore them and check data correctness """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index ffb87c5e..b8449abe 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -180,8 +180,8 @@ class ProbackupTest(object): self.test_env['LC_MESSAGES'] = 'C' self.test_env['LC_TIME'] = 'C' - self.gdb = 'PGPROBACKUP_GDB' in os.environ and \ - os.environ['PGPROBACKUP_GDB'] == 'ON' + self.gdb = 'PGPROBACKUP_GDB' in self.test_env and \ + self.test_env['PGPROBACKUP_GDB'] == 'ON' self.paranoia = 'PG_PROBACKUP_PARANOIA' in self.test_env and \ self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON' @@ -810,6 +810,7 @@ class ProbackupTest(object): if self.verbose: print(self.cmd) if gdb: + #TODO REVIEW XXX no self parameter return GDBobj([binary_path] + command, self.verbose) if asynchronous: return subprocess.Popen( @@ -1861,8 +1862,15 @@ class ProbackupTest(object): self.assertFalse(fail, error_message) def gdb_attach(self, pid): + #TODO REVIEW XXX no self parameter return GDBobj([str(pid)], self.verbose, attach=True) + def _check_gdb_flag_or_skip_test(self): + if not self.gdb: + self.skipTest( + "Specify PGPROBACKUP_GDB and build without " + "optimizations for run this test" + ) class GdbException(Exception): def __init__(self, message=False): @@ -1877,6 +1885,11 @@ class GDBobj(ProbackupTest): self.verbose = verbose self.output = '' + # Check gdb flag is set up + # if not self.gdb: + # raise GdbException("No `PGPROBACKUP_GDB=on` is set, " + # "test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start " + # "and be skipped") # Check gdb presense try: gdb_version, _ = subprocess.Popen( diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index a80d317d..b76345b8 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -18,6 +18,8 @@ class BugTest(ProbackupTest, unittest.TestCase): """ https://jira.postgrespro.ru/browse/PGPRO-2068 """ + self._check_gdb_flag_or_skip_test() + if not self.gdb: self.skipTest( "Specify PGPROBACKUP_GDB and build without " diff --git a/tests/ptrack.py b/tests/ptrack.py index 5878f070..08ea90f8 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -824,6 +824,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase): def test_ptrack_vacuum_full(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" + self._check_gdb_flag_or_skip_test() + backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(module_name, self.fname, 'node'), diff --git a/tests/replica.py b/tests/replica.py index 45eed3fb..ba7076fa 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -634,11 +634,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): def test_replica_stop_lsn_null_offset(self): """ """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -722,11 +719,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): def test_replica_stop_lsn_null_offset_next_record(self): """ """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -828,6 +822,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): def test_archive_replica_null_offset(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -998,11 +994,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -1104,11 +1097,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): def test_start_stop_lsn_in_the_same_segno(self): """ """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( From 26939d67c444156bfea5b3701d34bd5495df0e83 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 1 Jul 2022 03:57:36 +0300 Subject: [PATCH 22/28] [PBCKP-220] removed inheritance GDBObj->ProbackupTest --- tests/helpers/ptrack_helpers.py | 26 ++++++++++++++------------ tests/replica.py | 4 ---- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b8449abe..e69d1213 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -810,8 +810,7 @@ class ProbackupTest(object): if self.verbose: print(self.cmd) if gdb: - #TODO REVIEW XXX no self parameter - return GDBobj([binary_path] + command, self.verbose) + return GDBobj([binary_path] + command, self) if asynchronous: return subprocess.Popen( [binary_path] + command, @@ -1862,8 +1861,7 @@ class ProbackupTest(object): self.assertFalse(fail, error_message) def gdb_attach(self, pid): - #TODO REVIEW XXX no self parameter - return GDBobj([str(pid)], self.verbose, attach=True) + return GDBobj([str(pid)], self, attach=True) def _check_gdb_flag_or_skip_test(self): if not self.gdb: @@ -1872,24 +1870,28 @@ class ProbackupTest(object): "optimizations for run this test" ) + class GdbException(Exception): - def __init__(self, message=False): + def __init__(self, message="False"): self.message = message def __str__(self): return '\n ERROR: {0}\n'.format(repr(self.message)) -class GDBobj(ProbackupTest): - def __init__(self, cmd, verbose, attach=False): - self.verbose = verbose +#TODO REVIEW XXX no inheritance needed +# class GDBobj(ProbackupTest): +class GDBobj: + # TODO REVIEW XXX Type specification env:ProbackupTest is only for python3, is it ok? + def __init__(self, cmd, env: ProbackupTest, attach=False): + self.verbose = env.verbose self.output = '' # Check gdb flag is set up - # if not self.gdb: - # raise GdbException("No `PGPROBACKUP_GDB=on` is set, " - # "test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start " - # "and be skipped") + if not env.gdb: + raise GdbException("No `PGPROBACKUP_GDB=on` is set, " + "test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start " + "and be skipped") # Check gdb presense try: gdb_version, _ = subprocess.Popen( diff --git a/tests/replica.py b/tests/replica.py index ba7076fa..85034d50 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -719,7 +719,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): def test_replica_stop_lsn_null_offset_next_record(self): """ """ - self._check_gdb_flag_or_skip_test() fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -743,7 +742,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) self.backup_node(backup_dir, 'master', master) @@ -1097,7 +1095,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): def test_start_stop_lsn_in_the_same_segno(self): """ """ - self._check_gdb_flag_or_skip_test() fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1121,7 +1118,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) self.backup_node(backup_dir, 'master', master, options=['--stream']) From 9c6e3ce3f751162cf7ac5405d0cc4ff462324181 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Fri, 1 Jul 2022 13:52:20 +0300 Subject: [PATCH 23/28] [PBCKP-220] all gdb tests fixup --- .travis.yml | 1 + tests/archive.py | 2 ++ tests/backup.py | 18 ++++++++++++++++++ tests/checkdb.py | 2 ++ tests/helpers/ptrack_helpers.py | 3 --- tests/locking.py | 16 ++++++++++++++++ tests/logging.py | 4 ++++ tests/merge.py | 32 ++++++++++++++++++++++++++++++++ tests/replica.py | 1 + tests/restore.py | 4 ++++ tests/retention.py | 12 ++++++++++++ tests/validate.py | 2 ++ 12 files changed, 94 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 26b2bc4e..bac8a2c0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,6 +34,7 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE PGPROBACKUP_GDB=ON PG_PROBACKUP_TEST_BASIC=OFF # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup diff --git a/tests/archive.py b/tests/archive.py index e01b7d37..cd8d4404 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -228,6 +228,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase): Check pg_stop_backup_timeout, needed backup_timeout Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/backup.py b/tests/backup.py index 68240901..20ac480e 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1095,6 +1095,8 @@ class BackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_drop_rel_during_full_backup(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1244,6 +1246,8 @@ class BackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_drop_rel_during_backup_delta(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1313,6 +1317,8 @@ class BackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_drop_rel_during_backup_page(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1445,6 +1451,8 @@ class BackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_backup_concurrent_drop_table(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1579,6 +1587,8 @@ class BackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_sigint_handling(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1618,6 +1628,8 @@ class BackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_sigterm_handling(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1656,6 +1668,8 @@ class BackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_sigquit_handling(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2906,6 +2920,8 @@ class BackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_missing_wal_segment(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -3292,6 +3308,8 @@ class BackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_backup_atexit(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/checkdb.py b/tests/checkdb.py index 9b7adcd7..68dec14b 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -17,6 +17,8 @@ class CheckdbTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_checkdb_amcheck_only_sanity(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e69d1213..e3036d9c 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1879,10 +1879,7 @@ class GdbException(Exception): return '\n ERROR: {0}\n'.format(repr(self.message)) -#TODO REVIEW XXX no inheritance needed -# class GDBobj(ProbackupTest): class GDBobj: - # TODO REVIEW XXX Type specification env:ProbackupTest is only for python3, is it ok? def __init__(self, cmd, env: ProbackupTest, attach=False): self.verbose = env.verbose self.output = '' diff --git a/tests/locking.py b/tests/locking.py index ef7aa1f2..0fe954ca 100644 --- a/tests/locking.py +++ b/tests/locking.py @@ -17,6 +17,8 @@ class LockingTest(ProbackupTest, unittest.TestCase): run validate, expect it to successfully executed, concurrent RUNNING backup with pid file and active process is legal """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -72,6 +74,8 @@ class LockingTest(ProbackupTest, unittest.TestCase): RUNNING backup with pid file AND without active pid is legal, but his status must be changed to ERROR and pid file is deleted """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -142,6 +146,8 @@ class LockingTest(ProbackupTest, unittest.TestCase): RUNNING backup with pid file AND without active pid is legal, but his status must be changed to ERROR and pid file is deleted """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -240,6 +246,8 @@ class LockingTest(ProbackupTest, unittest.TestCase): RUNNING backup without pid file AND without active pid is legal, his status must be changed to ERROR """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -310,6 +318,8 @@ class LockingTest(ProbackupTest, unittest.TestCase): Expect restore to sucseed because read-only locks do not conflict """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -352,6 +362,8 @@ class LockingTest(ProbackupTest, unittest.TestCase): Expect restore to fail because validation of intermediate backup is impossible """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -443,6 +455,8 @@ class LockingTest(ProbackupTest, unittest.TestCase): and stop it in the middle, delete full backup. Expect it to fail. """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -585,6 +599,8 @@ class LockingTest(ProbackupTest, unittest.TestCase): """ Make sure that shared lock leaves no files with pids """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), diff --git a/tests/logging.py b/tests/logging.py index 47143cfb..70ebcf6d 100644 --- a/tests/logging.py +++ b/tests/logging.py @@ -12,6 +12,10 @@ class LogTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure # PGPRO-2154 def test_log_rotation(self): + """ + """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), diff --git a/tests/merge.py b/tests/merge.py index fe0927f4..5f092543 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -975,6 +975,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): """ Check that failed MERGE can be continued """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1051,6 +1053,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): """ Fail merge via gdb, corrupt DELTA backup, try to continue merge """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1148,6 +1152,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): """ Check that failed MERGE on delete can be continued """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1219,6 +1225,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): Check that failed MERGE cannot be continued if intermediate backup is missing. """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1409,6 +1417,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): check that crashing after opening backup.control for writing will not result in losing backup metadata """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1461,6 +1471,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): for writing will not result in losing metadata about backup files TODO: rewrite """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1552,6 +1564,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): for writing will not result in losing metadata about backup files TODO: rewrite """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1639,6 +1653,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): def test_failed_merge_after_delete(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1720,6 +1736,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): def test_failed_merge_after_delete_1(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1796,6 +1814,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): def test_failed_merge_after_delete_2(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1858,6 +1878,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): def test_failed_merge_after_delete_3(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2281,6 +2303,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): def test_idempotent_merge(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2580,6 +2604,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): page header map cannot be trusted when running retry """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2626,6 +2652,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): def test_missing_data_file(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2684,6 +2712,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): def test_missing_non_data_file(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2741,6 +2771,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): def test_merge_remote_mode(self): """ """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/replica.py b/tests/replica.py index 85034d50..0a75ea17 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -719,6 +719,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): def test_replica_stop_lsn_null_offset_next_record(self): """ """ + self._check_gdb_flag_or_skip_test() fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') diff --git a/tests/restore.py b/tests/restore.py index bbdadeb2..5a00bc23 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -2379,6 +2379,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_restore_concurrent_drop_table(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -3797,6 +3799,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_concurrent_restore(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/retention.py b/tests/retention.py index 19204807..b0399a23 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1499,6 +1499,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase): FULL -------window """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -1546,6 +1548,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase): FULL -------window """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -1588,6 +1592,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase): def test_retention_redundancy_overlapping_chains(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -1636,6 +1642,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase): def test_retention_redundancy_overlapping_chains_1(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), @@ -1744,6 +1752,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase): """ Check that retention purge works correctly with MERGING backups """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -2536,6 +2546,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase): """ https://github.com/postgrespro/pg_probackup/issues/328 """ + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/validate.py b/tests/validate.py index e6282638..41aa9ea2 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -3565,6 +3565,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_validation_after_backup(self): """""" + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( From 125c9292a6ddd9372263894333b96ebdbb3ac767 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Mon, 4 Jul 2022 02:37:38 +0300 Subject: [PATCH 24/28] [PBCKP-220] ALL tests with PGPROBACKUP=ON on CI --- .travis.yml | 2 +- travis/run_tests.sh | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index bac8a2c0..e6330c4f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,7 +34,7 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE PGPROBACKUP_GDB=ON PG_PROBACKUP_TEST_BASIC=OFF + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=TMP # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 37614f97..c20c95dd 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -112,6 +112,9 @@ if [ "$MODE" = "basic" ]; then echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests python3 -m unittest -v tests.init +elif [ "$MODE" = "TMP" ]; then + echo MODE=TMP + PGPROBACKUP_GDB=ON python3 -m unittest -v tests else echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests.$MODE From 3e8a08edd5f9a20dd3d6f77914cd2b9c745a7980 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Mon, 4 Jul 2022 06:04:17 +0300 Subject: [PATCH 25/28] [PBCKP-220] removed FULL tests, PGPROBACKUP=ON and other flags added on CI --- .gitignore | 1 - .travis.yml | 3 ++- tests/Readme.md | 28 ++++++++-------------------- tests/checkdb.py | 7 ++----- tests/pgpro2068.py | 5 ----- tests/replica.py | 7 ++----- tests/validate.py | 7 ++----- travis/run_tests.sh | 13 +++++++------ 8 files changed, 23 insertions(+), 48 deletions(-) diff --git a/.gitignore b/.gitignore index c0b4de33..50247360 100644 --- a/.gitignore +++ b/.gitignore @@ -50,7 +50,6 @@ /docker-compose.yml /Dockerfile /Dockerfile.in -/run_tests.sh /make_dockerfile.sh /backup_restore.sh diff --git a/.travis.yml b/.travis.yml index e6330c4f..d5c9c68b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,7 +34,8 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=TMP +# - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=FULL ENV_FLAGS=PGPROBACKUP_GDB=ON + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE ENV_FLAGS=PGPROBACKUP_GDB=ON # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup diff --git a/tests/Readme.md b/tests/Readme.md index f980b6ae..11c5272f 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -51,31 +51,19 @@ Usage: python -m unittest [-v] tests[.specific_module][.class.test] ``` -### Troubleshooting FAQ +# Troubleshooting FAQ -#### python test failures -1. Test failure reason like +## Python tests failure +### 1. Could not open extension "..." ``` -testgres.exceptions.QueryException ERROR: could not open extension control file "/home/avaness/postgres/postgres.build/share/extension/amcheck.control": No such file or directory +testgres.exceptions.QueryException ERROR: could not open extension control file "/share/extension/amcheck.control": No such file or directory ``` -*Solution*: you have no `/contrib/` extensions installed +#### Solution: + +You have no `/contrib/...` extension installed, please do ```commandline cd -make world install -``` - -2. Test failure - -``` -FAIL: test_help_6 (tests.option.OptionTest) -``` - -*Solution*: you didn't configure postgres build with `--enable-nls` - -```commandline -cd -make distclean - --enable-nls +make install-world ``` diff --git a/tests/checkdb.py b/tests/checkdb.py index 68dec14b..5b6dda25 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -546,11 +546,8 @@ class CheckdbTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_checkdb_sigint_handling(self): """""" - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index b76345b8..3baa0ba0 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -20,11 +20,6 @@ class BugTest(ProbackupTest, unittest.TestCase): """ self._check_gdb_flag_or_skip_test() - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), diff --git a/tests/replica.py b/tests/replica.py index 0a75ea17..acf655aa 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -539,11 +539,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase): start backup from replica, during backup promote replica check that backup is failed """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( diff --git a/tests/validate.py b/tests/validate.py index 41aa9ea2..22a03c3b 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -1089,11 +1089,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase): """ check that interrupt during validation is handled correctly """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) + self._check_gdb_flag_or_skip_test() + fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), diff --git a/travis/run_tests.sh b/travis/run_tests.sh index c20c95dd..5af619f9 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -107,17 +107,18 @@ echo PGPROBACKUPBIN=${PGPROBACKUPBIN} echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} +echo ADDITIONAL_ENV_FLAGS=${ENV_FLAGS} if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - python3 -m unittest -v tests - python3 -m unittest -v tests.init -elif [ "$MODE" = "TMP" ]; then - echo MODE=TMP - PGPROBACKUP_GDB=ON python3 -m unittest -v tests + ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests + ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests.init +#elif [ "$MODE" = "FULL" ]; then +# echo MODE=FULL +# ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests else echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - python3 -m unittest -v tests.$MODE + ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests.$MODE fi # Generate *.gcov files From 37244019508ae3396e521f2ed87bb8c2eca9108d Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Mon, 4 Jul 2022 14:37:36 +0300 Subject: [PATCH 26/28] [PBCKP-220] final junk cleanup --- .travis.yml | 2 -- travis/run_tests.sh | 10 +++------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index d5c9c68b..26b2bc4e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,8 +34,6 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE -# - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=FULL ENV_FLAGS=PGPROBACKUP_GDB=ON - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE ENV_FLAGS=PGPROBACKUP_GDB=ON # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 5af619f9..37614f97 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -107,18 +107,14 @@ echo PGPROBACKUPBIN=${PGPROBACKUPBIN} echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} -echo ADDITIONAL_ENV_FLAGS=${ENV_FLAGS} if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests - ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests.init -#elif [ "$MODE" = "FULL" ]; then -# echo MODE=FULL -# ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests + python3 -m unittest -v tests + python3 -m unittest -v tests.init else echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - ${ADDITIONAL_ENV_FLAGS} python3 -m unittest -v tests.$MODE + python3 -m unittest -v tests.$MODE fi # Generate *.gcov files From 3071cec4f647582eac8bd58ce5624f97e90745c4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 11 Jul 2022 14:50:22 +0300 Subject: [PATCH 27/28] fix ArchiveTest.test_pgpro434_4 --- tests/archive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/archive.py b/tests/archive.py index cd8d4404..52fb225e 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -317,7 +317,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase): gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() - self.set_auto_conf(node, {'archive_command': "'exit 1'"}) + self.set_auto_conf(node, {'archive_command': 'exit 1'}) node.reload() os.environ["PGAPPNAME"] = "foo" From 81c53ea0bbda3251ab527dc950327b86e69e7645 Mon Sep 17 00:00:00 2001 From: Ivan Lazarev Date: Tue, 12 Jul 2022 15:04:21 +0300 Subject: [PATCH 28/28] [PBCKP-231] hotfix for python2 --- tests/helpers/ptrack_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e3036d9c..de774274 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1880,7 +1880,7 @@ class GdbException(Exception): class GDBobj: - def __init__(self, cmd, env: ProbackupTest, attach=False): + def __init__(self, cmd, env, attach=False): self.verbose = env.verbose self.output = ''