You've already forked pg_probackup
mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-07-14 06:54:15 +02:00
Merge branch 'REL_2_5' into PBCKP-155
This commit is contained in:
94
.github/workflows/build.yml
vendored
Normal file
94
.github/workflows/build.yml
vendored
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
name: Build Probackup
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- "**"
|
||||||
|
# Runs triggered by pull requests are disabled to prevent executing potentially unsafe code from public pull requests
|
||||||
|
# pull_request:
|
||||||
|
# branches:
|
||||||
|
# - main
|
||||||
|
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build-win2019:
|
||||||
|
|
||||||
|
runs-on:
|
||||||
|
- windows-2019
|
||||||
|
|
||||||
|
env:
|
||||||
|
zlib_dir: C:\dep\zlib
|
||||||
|
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Install pacman packages
|
||||||
|
run: |
|
||||||
|
$env:PATH += ";C:\msys64\usr\bin"
|
||||||
|
pacman -S --noconfirm --needed bison flex
|
||||||
|
|
||||||
|
- name: Make zlib
|
||||||
|
run: |
|
||||||
|
git clone -b v1.2.11 --depth 1 https://github.com/madler/zlib.git
|
||||||
|
cd zlib
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX:PATH=C:\dep\zlib -G "Visual Studio 16 2019" .
|
||||||
|
cmake --build . --config Release --target ALL_BUILD
|
||||||
|
cmake --build . --config Release --target INSTALL
|
||||||
|
copy C:\dep\zlib\lib\zlibstatic.lib C:\dep\zlib\lib\zdll.lib
|
||||||
|
copy C:\dep\zlib\bin\zlib.dll C:\dep\zlib\lib
|
||||||
|
|
||||||
|
- name: Get Postgres sources
|
||||||
|
run: git clone -b REL_14_STABLE https://github.com/postgres/postgres.git
|
||||||
|
|
||||||
|
# Copy ptrack to contrib to build the ptrack extension
|
||||||
|
# Convert line breaks in the patch file to LF otherwise the patch doesn't apply
|
||||||
|
- name: Get Ptrack sources
|
||||||
|
run: |
|
||||||
|
git clone -b master --depth 1 https://github.com/postgrespro/ptrack.git
|
||||||
|
Copy-Item -Path ptrack -Destination postgres\contrib -Recurse
|
||||||
|
(Get-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Raw).Replace("`r`n","`n") | Set-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Force -NoNewline
|
||||||
|
cd postgres
|
||||||
|
git apply -3 ../ptrack/patches/REL_14_STABLE-ptrack-core.diff
|
||||||
|
|
||||||
|
- name: Build Postgres
|
||||||
|
run: |
|
||||||
|
$env:PATH += ";C:\msys64\usr\bin"
|
||||||
|
cd postgres\src\tools\msvc
|
||||||
|
(Get-Content config_default.pl) -Replace "zlib *=>(.*?)(?=,? *#)", "zlib => '${{ env.zlib_dir }}'" | Set-Content config.pl
|
||||||
|
cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && .\build.bat"
|
||||||
|
|
||||||
|
- name: Build Probackup
|
||||||
|
run: cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && perl .\gen_probackup_project.pl `"${{ github.workspace }}`"\postgres"
|
||||||
|
|
||||||
|
- name: Install Postgres
|
||||||
|
run: |
|
||||||
|
cd postgres
|
||||||
|
src\tools\msvc\install.bat postgres_install
|
||||||
|
|
||||||
|
- name: Install Testgres
|
||||||
|
run: |
|
||||||
|
git clone -b no-port-for --single-branch --depth 1 https://github.com/postgrespro/testgres.git
|
||||||
|
cd testgres
|
||||||
|
python setup.py install
|
||||||
|
|
||||||
|
# Grant the Github runner user full control of the workspace for initdb to successfully process the data folder
|
||||||
|
- name: Test Probackup
|
||||||
|
run: |
|
||||||
|
icacls.exe "${{ github.workspace }}" /grant "${env:USERNAME}:(OI)(CI)F"
|
||||||
|
$env:PATH += ";${{ github.workspace }}\postgres\postgres_install\lib;${{ env.zlib_dir }}\lib"
|
||||||
|
$Env:LC_MESSAGES = "English"
|
||||||
|
$Env:PG_CONFIG = "${{ github.workspace }}\postgres\postgres_install\bin\pg_config.exe"
|
||||||
|
$Env:PGPROBACKUPBIN = "${{ github.workspace }}\postgres\Release\pg_probackup\pg_probackup.exe"
|
||||||
|
$Env:PG_PROBACKUP_PTRACK = "ON"
|
||||||
|
If (!$Env:MODE -Or $Env:MODE -Eq "basic") {
|
||||||
|
$Env:PG_PROBACKUP_TEST_BASIC = "ON"
|
||||||
|
python -m unittest -v tests
|
||||||
|
python -m unittest -v tests.init
|
||||||
|
} else {
|
||||||
|
python -m unittest -v tests.$Env:MODE
|
||||||
|
}
|
||||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -50,7 +50,6 @@
|
|||||||
/docker-compose.yml
|
/docker-compose.yml
|
||||||
/Dockerfile
|
/Dockerfile
|
||||||
/Dockerfile.in
|
/Dockerfile.in
|
||||||
/run_tests.sh
|
|
||||||
/make_dockerfile.sh
|
/make_dockerfile.sh
|
||||||
/backup_restore.sh
|
/backup_restore.sh
|
||||||
|
|
||||||
|
@ -41,11 +41,13 @@ env:
|
|||||||
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta
|
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta
|
||||||
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking
|
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking
|
||||||
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=merge
|
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=merge
|
||||||
|
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=option
|
||||||
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=page
|
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=page
|
||||||
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=ptrack
|
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=ptrack
|
||||||
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica
|
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica
|
||||||
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=retention
|
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=retention
|
||||||
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore
|
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore
|
||||||
|
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=time_consuming
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
allow_failures:
|
allow_failures:
|
||||||
|
14
README.md
14
README.md
@ -224,3 +224,17 @@ Postgres Professional, Moscow, Russia.
|
|||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
`pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier.
|
`pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier.
|
||||||
|
|
||||||
|
|
||||||
|
### Localization files (*.po)
|
||||||
|
|
||||||
|
Description of how to add new translation languages.
|
||||||
|
1. Add a flag --enable-nls in configure.
|
||||||
|
2. Build postgres.
|
||||||
|
3. Adding to nls.mk in folder pg_probackup required files in GETTEXT_FILES.
|
||||||
|
4. In folder pg_probackup do 'make update-po'.
|
||||||
|
5. As a result, the progname.pot file will be created. Copy the content and add it to the file with the desired language.
|
||||||
|
6. Adding to nls.mk in folder pg_probackup required language in AVAIL_LANGUAGES.
|
||||||
|
|
||||||
|
For more information, follow the link below:
|
||||||
|
https://postgrespro.ru/docs/postgresql/12/nls-translator
|
||||||
|
@ -3563,6 +3563,14 @@ pg_probackup catchup -b <replaceable>catchup_mode</replaceable> --source-pgdata=
|
|||||||
of threads with the <option>--threads</option> option:
|
of threads with the <option>--threads</option> option:
|
||||||
<programlisting>
|
<programlisting>
|
||||||
pg_probackup catchup -b <replaceable>catchup_mode</replaceable> --source-pgdata=<replaceable>path_to_pgdata_on_remote_server</replaceable> --destination-pgdata=<replaceable>path_to_local_dir</replaceable> --stream --threads=<replaceable>num_threads</replaceable>
|
pg_probackup catchup -b <replaceable>catchup_mode</replaceable> --source-pgdata=<replaceable>path_to_pgdata_on_remote_server</replaceable> --destination-pgdata=<replaceable>path_to_local_dir</replaceable> --stream --threads=<replaceable>num_threads</replaceable>
|
||||||
|
</programlisting>
|
||||||
|
</para>
|
||||||
|
<para>
|
||||||
|
Before cloning/synchronising a <productname>PostgreSQL</productname> instance, you can run the
|
||||||
|
<command>catchup</command> command with the <option>--dry-run</option> flag
|
||||||
|
to estimate the size of data files to be transferred, but make no changes on disk:
|
||||||
|
<programlisting>
|
||||||
|
pg_probackup catchup -b <replaceable>catchup_mode</replaceable> --source-pgdata=<replaceable>path_to_pgdata_on_remote_server</replaceable> --destination-pgdata=<replaceable>path_to_local_dir</replaceable> --stream --dry-run
|
||||||
</programlisting>
|
</programlisting>
|
||||||
</para>
|
</para>
|
||||||
<para>
|
<para>
|
||||||
@ -4482,7 +4490,7 @@ pg_probackup archive-get -B <replaceable>backup_dir</replaceable> --instance <re
|
|||||||
pg_probackup catchup -b <replaceable>catchup_mode</replaceable>
|
pg_probackup catchup -b <replaceable>catchup_mode</replaceable>
|
||||||
--source-pgdata=<replaceable>path_to_pgdata_on_remote_server</replaceable>
|
--source-pgdata=<replaceable>path_to_pgdata_on_remote_server</replaceable>
|
||||||
--destination-pgdata=<replaceable>path_to_local_dir</replaceable>
|
--destination-pgdata=<replaceable>path_to_local_dir</replaceable>
|
||||||
[--help] [-j | --threads=<replaceable>num_threads</replaceable>] [--stream]
|
[--help] [-j | --threads=<replaceable>num_threads</replaceable>] [--stream] [--dry-run]
|
||||||
[--temp-slot] [-P | --perm-slot] [-S | --slot=<replaceable>slot_name</replaceable>]
|
[--temp-slot] [-P | --perm-slot] [-S | --slot=<replaceable>slot_name</replaceable>]
|
||||||
[--exclude-path=<replaceable>PATHNAME</replaceable>]
|
[--exclude-path=<replaceable>PATHNAME</replaceable>]
|
||||||
[-T <replaceable>OLDDIR</replaceable>=<replaceable>NEWDIR</replaceable>]
|
[-T <replaceable>OLDDIR</replaceable>=<replaceable>NEWDIR</replaceable>]
|
||||||
@ -4571,6 +4579,19 @@ pg_probackup catchup -b <replaceable>catchup_mode</replaceable>
|
|||||||
</listitem>
|
</listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry>
|
||||||
|
<term><option>--dry-run</option></term>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
Displays the total size of the files to be transferred by <command>catchup</command>.
|
||||||
|
This flag initiates a trial run of <command>catchup</command>, which does
|
||||||
|
not actually create, delete or move files on disk. WAL streaming is skipped with <option>--dry-run</option>.
|
||||||
|
This flag also allows you to check that
|
||||||
|
all the options are correct and cloning/synchronising is ready to run.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry>
|
<varlistentry>
|
||||||
<term><option>-x</option>=<replaceable>path_prefix</replaceable></term>
|
<term><option>-x</option>=<replaceable>path_prefix</replaceable></term>
|
||||||
<term><option>--exclude-path</option>=<replaceable>path_prefix</replaceable></term>
|
<term><option>--exclude-path</option>=<replaceable>path_prefix</replaceable></term>
|
||||||
@ -4591,17 +4612,6 @@ pg_probackup catchup -b <replaceable>catchup_mode</replaceable>
|
|||||||
</listitem>
|
</listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry>
|
|
||||||
<term><option>--stream</option></term>
|
|
||||||
<listitem>
|
|
||||||
<para>
|
|
||||||
Copies the instance in <link linkend="pbk-stream-mode">STREAM</link> WAL delivery mode,
|
|
||||||
including all the necessary WAL files by streaming them from
|
|
||||||
the instance server via replication protocol.
|
|
||||||
</para>
|
|
||||||
</listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry>
|
<varlistentry>
|
||||||
<term><option>--temp-slot</option></term>
|
<term><option>--temp-slot</option></term>
|
||||||
<listitem>
|
<listitem>
|
||||||
|
@ -16,8 +16,8 @@ if (($#ARGV+1)==1)
|
|||||||
print STDERR "Usage $0 pg-source-dir\n";
|
print STDERR "Usage $0 pg-source-dir\n";
|
||||||
print STDERR "Like this:\n";
|
print STDERR "Like this:\n";
|
||||||
print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro\n";
|
print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro\n";
|
||||||
print STDERR "May be need input this before: \n";
|
print STDERR "May need to run this first:\n";
|
||||||
print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall\" amd64\n";
|
print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat\" amd64\n";
|
||||||
exit 1;
|
exit 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -133,7 +133,7 @@ sub build_pgprobackup
|
|||||||
unless (-d 'src/tools/msvc' && -d 'src');
|
unless (-d 'src/tools/msvc' && -d 'src');
|
||||||
|
|
||||||
# my $vsVersion = DetermineVisualStudioVersion();
|
# my $vsVersion = DetermineVisualStudioVersion();
|
||||||
my $vsVersion = '12.00';
|
my $vsVersion = '16.00';
|
||||||
|
|
||||||
$solution = CreateSolution($vsVersion, $config);
|
$solution = CreateSolution($vsVersion, $config);
|
||||||
|
|
||||||
|
6
nls.mk
Normal file
6
nls.mk
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# contrib/pg_probackup/nls.mk
|
||||||
|
CATALOG_NAME = pg_probackup
|
||||||
|
AVAIL_LANGUAGES = ru
|
||||||
|
GETTEXT_FILES = src/help.c
|
||||||
|
GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS)
|
||||||
|
GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS)
|
@ -20,7 +20,15 @@ ulimit -n 1024
|
|||||||
|
|
||||||
if [ ${DISTRIB} = 'centos' ] ; then
|
if [ ${DISTRIB} = 'centos' ] ; then
|
||||||
sed -i 's|^baseurl=http://|baseurl=https://|g' /etc/yum.repos.d/*.repo
|
sed -i 's|^baseurl=http://|baseurl=https://|g' /etc/yum.repos.d/*.repo
|
||||||
|
if [ ${DISTRIB_VERSION} = '8' ]; then
|
||||||
|
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
fi
|
||||||
yum update -y
|
yum update -y
|
||||||
|
if [ ${DISTRIB_VERSION} = '8' ]; then
|
||||||
|
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# PACKAGES NEEDED
|
# PACKAGES NEEDED
|
||||||
|
@ -15,7 +15,16 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g')
|
|||||||
|
|
||||||
if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then
|
if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then
|
||||||
# update of rpm package is broken in rhel-7 (26/12/2022)
|
# update of rpm package is broken in rhel-7 (26/12/2022)
|
||||||
|
#yum update -y
|
||||||
|
if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
|
||||||
|
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
fi
|
||||||
yum update -y
|
yum update -y
|
||||||
|
if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
|
||||||
|
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
# yum upgrade -y || echo 'some packages in docker failed to upgrade'
|
# yum upgrade -y || echo 'some packages in docker failed to upgrade'
|
||||||
# yum install -y sudo
|
# yum install -y sudo
|
||||||
|
@ -15,7 +15,15 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g')
|
|||||||
|
|
||||||
if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then
|
if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then
|
||||||
# update of rpm package is broken in rhel-7 (26/12/2022)
|
# update of rpm package is broken in rhel-7 (26/12/2022)
|
||||||
|
if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
|
||||||
|
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
fi
|
||||||
yum update -y
|
yum update -y
|
||||||
|
if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
|
||||||
|
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ${PBK_EDITION} == 'ent' ]; then
|
if [ ${PBK_EDITION} == 'ent' ]; then
|
||||||
@ -80,11 +88,13 @@ if [ $PBK_EDITION == 'std' ] ; then
|
|||||||
|
|
||||||
# install POSTGRESQL
|
# install POSTGRESQL
|
||||||
# rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-${DISTRIB_VERSION}-x86_64/pgdg-redhat-repo-latest.noarch.rpm
|
# rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-${DISTRIB_VERSION}-x86_64/pgdg-redhat-repo-latest.noarch.rpm
|
||||||
if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then
|
#if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then
|
||||||
rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
|
# rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
|
||||||
else
|
#else
|
||||||
rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
|
# rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
|
||||||
fi
|
#fi
|
||||||
|
curl -o pgpro-repo-add.sh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/pgpro-repo-add.sh
|
||||||
|
sh pgpro-repo-add.sh
|
||||||
|
|
||||||
if [[ ${PG_VERSION} == '9.6' ]]; then
|
if [[ ${PG_VERSION} == '9.6' ]]; then
|
||||||
yum install -y postgrespro${PG_TOG}-server.x86_64
|
yum install -y postgrespro${PG_TOG}-server.x86_64
|
||||||
|
@ -1084,15 +1084,15 @@ get_backup_filelist(pgBackup *backup, bool strict)
|
|||||||
|
|
||||||
COMP_FILE_CRC32(true, content_crc, buf, strlen(buf));
|
COMP_FILE_CRC32(true, content_crc, buf, strlen(buf));
|
||||||
|
|
||||||
get_control_value(buf, "path", path, NULL, true);
|
get_control_value_str(buf, "path", path, sizeof(path),true);
|
||||||
get_control_value(buf, "size", NULL, &write_size, true);
|
get_control_value_int64(buf, "size", &write_size, true);
|
||||||
get_control_value(buf, "mode", NULL, &mode, true);
|
get_control_value_int64(buf, "mode", &mode, true);
|
||||||
get_control_value(buf, "is_datafile", NULL, &is_datafile, true);
|
get_control_value_int64(buf, "is_datafile", &is_datafile, true);
|
||||||
get_control_value(buf, "is_cfs", NULL, &is_cfs, false);
|
get_control_value_int64(buf, "is_cfs", &is_cfs, false);
|
||||||
get_control_value(buf, "crc", NULL, &crc, true);
|
get_control_value_int64(buf, "crc", &crc, true);
|
||||||
get_control_value(buf, "compress_alg", compress_alg_string, NULL, false);
|
get_control_value_str(buf, "compress_alg", compress_alg_string, sizeof(compress_alg_string), false);
|
||||||
get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false);
|
get_control_value_int64(buf, "external_dir_num", &external_dir_num, false);
|
||||||
get_control_value(buf, "dbOid", NULL, &dbOid, false);
|
get_control_value_int64(buf, "dbOid", &dbOid, false);
|
||||||
|
|
||||||
file = pgFileInit(path);
|
file = pgFileInit(path);
|
||||||
file->write_size = (int64) write_size;
|
file->write_size = (int64) write_size;
|
||||||
@ -1107,28 +1107,28 @@ get_backup_filelist(pgBackup *backup, bool strict)
|
|||||||
/*
|
/*
|
||||||
* Optional fields
|
* Optional fields
|
||||||
*/
|
*/
|
||||||
if (get_control_value(buf, "linked", linked, NULL, false) && linked[0])
|
if (get_control_value_str(buf, "linked", linked, sizeof(linked), false) && linked[0])
|
||||||
{
|
{
|
||||||
file->linked = pgut_strdup(linked);
|
file->linked = pgut_strdup(linked);
|
||||||
canonicalize_path(file->linked);
|
canonicalize_path(file->linked);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_control_value(buf, "segno", NULL, &segno, false))
|
if (get_control_value_int64(buf, "segno", &segno, false))
|
||||||
file->segno = (int) segno;
|
file->segno = (int) segno;
|
||||||
|
|
||||||
if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false))
|
if (get_control_value_int64(buf, "n_blocks", &n_blocks, false))
|
||||||
file->n_blocks = (int) n_blocks;
|
file->n_blocks = (int) n_blocks;
|
||||||
|
|
||||||
if (get_control_value(buf, "n_headers", NULL, &n_headers, false))
|
if (get_control_value_int64(buf, "n_headers", &n_headers, false))
|
||||||
file->n_headers = (int) n_headers;
|
file->n_headers = (int) n_headers;
|
||||||
|
|
||||||
if (get_control_value(buf, "hdr_crc", NULL, &hdr_crc, false))
|
if (get_control_value_int64(buf, "hdr_crc", &hdr_crc, false))
|
||||||
file->hdr_crc = (pg_crc32) hdr_crc;
|
file->hdr_crc = (pg_crc32) hdr_crc;
|
||||||
|
|
||||||
if (get_control_value(buf, "hdr_off", NULL, &hdr_off, false))
|
if (get_control_value_int64(buf, "hdr_off", &hdr_off, false))
|
||||||
file->hdr_off = hdr_off;
|
file->hdr_off = hdr_off;
|
||||||
|
|
||||||
if (get_control_value(buf, "hdr_size", NULL, &hdr_size, false))
|
if (get_control_value_int64(buf, "hdr_size", &hdr_size, false))
|
||||||
file->hdr_size = (int) hdr_size;
|
file->hdr_size = (int) hdr_size;
|
||||||
|
|
||||||
parray_append(files, file);
|
parray_append(files, file);
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
*
|
*
|
||||||
* catchup.c: sync DB cluster
|
* catchup.c: sync DB cluster
|
||||||
*
|
*
|
||||||
* Copyright (c) 2021, Postgres Professional
|
* Copyright (c) 2022, Postgres Professional
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -507,15 +507,19 @@ catchup_multithreaded_copy(int num_threads,
|
|||||||
/* Run threads */
|
/* Run threads */
|
||||||
thread_interrupted = false;
|
thread_interrupted = false;
|
||||||
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
|
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
|
||||||
|
if (!dry_run)
|
||||||
|
{
|
||||||
for (i = 0; i < num_threads; i++)
|
for (i = 0; i < num_threads; i++)
|
||||||
{
|
{
|
||||||
elog(VERBOSE, "Start thread num: %i", i);
|
elog(VERBOSE, "Start thread num: %i", i);
|
||||||
pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i]));
|
pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i]));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Wait threads */
|
/* Wait threads */
|
||||||
for (i = 0; i < num_threads; i++)
|
for (i = 0; i < num_threads; i++)
|
||||||
{
|
{
|
||||||
|
if (!dry_run)
|
||||||
pthread_join(threads[i], NULL);
|
pthread_join(threads[i], NULL);
|
||||||
all_threads_successful &= threads_args[i].completed;
|
all_threads_successful &= threads_args[i].completed;
|
||||||
transfered_bytes_result += threads_args[i].transfered_bytes;
|
transfered_bytes_result += threads_args[i].transfered_bytes;
|
||||||
@ -706,9 +710,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
|
|||||||
|
|
||||||
/* Start stream replication */
|
/* Start stream replication */
|
||||||
join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR);
|
join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR);
|
||||||
|
if (!dry_run)
|
||||||
|
{
|
||||||
fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST);
|
fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST);
|
||||||
start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt,
|
start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt,
|
||||||
current.start_lsn, current.tli, false);
|
current.start_lsn, current.tli, false);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
elog(INFO, "WAL streaming skipping with --dry-run option");
|
||||||
|
|
||||||
source_filelist = parray_new();
|
source_filelist = parray_new();
|
||||||
|
|
||||||
@ -820,8 +829,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
|
|||||||
char dirpath[MAXPGPATH];
|
char dirpath[MAXPGPATH];
|
||||||
|
|
||||||
join_path_components(dirpath, dest_pgdata, file->rel_path);
|
join_path_components(dirpath, dest_pgdata, file->rel_path);
|
||||||
|
|
||||||
elog(VERBOSE, "Create directory '%s'", dirpath);
|
elog(VERBOSE, "Create directory '%s'", dirpath);
|
||||||
|
if (!dry_run)
|
||||||
fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST);
|
fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -853,6 +862,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
|
|||||||
elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"",
|
elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"",
|
||||||
linked_path, to_path);
|
linked_path, to_path);
|
||||||
|
|
||||||
|
if (!dry_run)
|
||||||
|
{
|
||||||
/* create tablespace directory */
|
/* create tablespace directory */
|
||||||
if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0)
|
if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0)
|
||||||
elog(ERROR, "Could not create tablespace directory \"%s\": %s",
|
elog(ERROR, "Could not create tablespace directory \"%s\": %s",
|
||||||
@ -864,6 +875,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
|
|||||||
linked_path, to_path, strerror(errno));
|
linked_path, to_path, strerror(errno));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* find pg_control file (in already sorted source_filelist)
|
* find pg_control file (in already sorted source_filelist)
|
||||||
@ -930,7 +942,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
|
|||||||
char fullpath[MAXPGPATH];
|
char fullpath[MAXPGPATH];
|
||||||
|
|
||||||
join_path_components(fullpath, dest_pgdata, file->rel_path);
|
join_path_components(fullpath, dest_pgdata, file->rel_path);
|
||||||
|
if (!dry_run)
|
||||||
|
{
|
||||||
fio_delete(file->mode, fullpath, FIO_LOCAL_HOST);
|
fio_delete(file->mode, fullpath, FIO_LOCAL_HOST);
|
||||||
|
}
|
||||||
elog(VERBOSE, "Deleted file \"%s\"", fullpath);
|
elog(VERBOSE, "Deleted file \"%s\"", fullpath);
|
||||||
|
|
||||||
/* shrink dest pgdata list */
|
/* shrink dest pgdata list */
|
||||||
@ -961,7 +976,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
|
|||||||
catchup_isok = transfered_datafiles_bytes != -1;
|
catchup_isok = transfered_datafiles_bytes != -1;
|
||||||
|
|
||||||
/* at last copy control file */
|
/* at last copy control file */
|
||||||
if (catchup_isok)
|
if (catchup_isok && !dry_run)
|
||||||
{
|
{
|
||||||
char from_fullpath[MAXPGPATH];
|
char from_fullpath[MAXPGPATH];
|
||||||
char to_fullpath[MAXPGPATH];
|
char to_fullpath[MAXPGPATH];
|
||||||
@ -972,7 +987,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
|
|||||||
transfered_datafiles_bytes += source_pg_control_file->size;
|
transfered_datafiles_bytes += source_pg_control_file->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!catchup_isok)
|
if (!catchup_isok && !dry_run)
|
||||||
{
|
{
|
||||||
char pretty_time[20];
|
char pretty_time[20];
|
||||||
char pretty_transfered_data_bytes[20];
|
char pretty_transfered_data_bytes[20];
|
||||||
@ -1010,14 +1025,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
|
|||||||
pg_free(stop_backup_query_text);
|
pg_free(stop_backup_query_text);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!dry_run)
|
||||||
wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t);
|
wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t);
|
||||||
|
|
||||||
#if PG_VERSION_NUM >= 90600
|
#if PG_VERSION_NUM >= 90600
|
||||||
/* Write backup_label */
|
/* Write backup_label */
|
||||||
Assert(stop_backup_result.backup_label_content != NULL);
|
Assert(stop_backup_result.backup_label_content != NULL);
|
||||||
|
if (!dry_run)
|
||||||
|
{
|
||||||
pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label",
|
pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label",
|
||||||
stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len,
|
stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len,
|
||||||
NULL);
|
NULL);
|
||||||
|
}
|
||||||
free(stop_backup_result.backup_label_content);
|
free(stop_backup_result.backup_label_content);
|
||||||
stop_backup_result.backup_label_content = NULL;
|
stop_backup_result.backup_label_content = NULL;
|
||||||
stop_backup_result.backup_label_content_len = 0;
|
stop_backup_result.backup_label_content_len = 0;
|
||||||
@ -1040,6 +1059,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* wait for end of wal streaming and calculate wal size transfered */
|
/* wait for end of wal streaming and calculate wal size transfered */
|
||||||
|
if (!dry_run)
|
||||||
{
|
{
|
||||||
parray *wal_files_list = NULL;
|
parray *wal_files_list = NULL;
|
||||||
wal_files_list = parray_new();
|
wal_files_list = parray_new();
|
||||||
@ -1091,17 +1111,17 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Sync all copied files unless '--no-sync' flag is used */
|
/* Sync all copied files unless '--no-sync' flag is used */
|
||||||
if (sync_dest_files)
|
if (sync_dest_files && !dry_run)
|
||||||
catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file);
|
catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file);
|
||||||
else
|
else
|
||||||
elog(WARNING, "Files are not synced to disk");
|
elog(WARNING, "Files are not synced to disk");
|
||||||
|
|
||||||
/* Cleanup */
|
/* Cleanup */
|
||||||
if (dest_filelist)
|
if (dest_filelist && !dry_run)
|
||||||
{
|
{
|
||||||
parray_walk(dest_filelist, pgFileFree);
|
parray_walk(dest_filelist, pgFileFree);
|
||||||
parray_free(dest_filelist);
|
|
||||||
}
|
}
|
||||||
|
parray_free(dest_filelist);
|
||||||
parray_walk(source_filelist, pgFileFree);
|
parray_walk(source_filelist, pgFileFree);
|
||||||
parray_free(source_filelist);
|
parray_free(source_filelist);
|
||||||
pgFileFree(source_pg_control_file);
|
pgFileFree(source_pg_control_file);
|
||||||
|
@ -349,6 +349,8 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
|
|||||||
Assert(false);
|
Assert(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */
|
||||||
|
fflush(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2030,10 +2032,10 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph,
|
|||||||
return false; /* EOF found */
|
return false; /* EOF found */
|
||||||
else if (read_len != 0 && feof(in))
|
else if (read_len != 0 && feof(in))
|
||||||
elog(ERROR,
|
elog(ERROR,
|
||||||
"Odd size page found at offset %lu of \"%s\"",
|
"Odd size page found at offset %ld of \"%s\"",
|
||||||
ftello(in), fullpath);
|
ftello(in), fullpath);
|
||||||
else
|
else
|
||||||
elog(ERROR, "Cannot read header at offset %lu of \"%s\": %s",
|
elog(ERROR, "Cannot read header at offset %ld of \"%s\": %s",
|
||||||
ftello(in), fullpath, strerror(errno));
|
ftello(in), fullpath, strerror(errno));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2321,7 +2323,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath,
|
|||||||
elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s",
|
elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s",
|
||||||
to_fullpath, strerror(errno));
|
to_fullpath, strerror(errno));
|
||||||
{
|
{
|
||||||
size_t pos = ftell(out);
|
long pos = ftell(out);
|
||||||
|
|
||||||
if (pos < 0)
|
if (pos < 0)
|
||||||
elog(ERROR, "Cannot get position in destination file \"%s\": %s",
|
elog(ERROR, "Cannot get position in destination file \"%s\": %s",
|
||||||
|
15
src/delete.c
15
src/delete.c
@ -36,7 +36,7 @@ do_delete(InstanceState *instanceState, time_t backup_id)
|
|||||||
parray *backup_list,
|
parray *backup_list,
|
||||||
*delete_list;
|
*delete_list;
|
||||||
pgBackup *target_backup = NULL;
|
pgBackup *target_backup = NULL;
|
||||||
size_t size_to_delete = 0;
|
int64 size_to_delete = 0;
|
||||||
char size_to_delete_pretty[20];
|
char size_to_delete_pretty[20];
|
||||||
|
|
||||||
/* Get complete list of backups */
|
/* Get complete list of backups */
|
||||||
@ -682,12 +682,11 @@ do_retention_wal(InstanceState *instanceState, bool dry_run)
|
|||||||
* at least one backup and no file should be removed.
|
* at least one backup and no file should be removed.
|
||||||
* Unless wal-depth is enabled.
|
* Unless wal-depth is enabled.
|
||||||
*/
|
*/
|
||||||
if ((tlinfo->closest_backup) && instance_config.wal_depth <= 0)
|
if ((tlinfo->closest_backup) && instance_config.wal_depth == 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* WAL retention keeps this timeline from purge */
|
/* WAL retention keeps this timeline from purge */
|
||||||
if (instance_config.wal_depth >= 0 && tlinfo->anchor_tli > 0 &&
|
if (tlinfo->anchor_tli > 0 && tlinfo->anchor_tli != tlinfo->tli)
|
||||||
tlinfo->anchor_tli != tlinfo->tli)
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -701,7 +700,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run)
|
|||||||
*/
|
*/
|
||||||
if (tlinfo->oldest_backup)
|
if (tlinfo->oldest_backup)
|
||||||
{
|
{
|
||||||
if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
|
if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
|
||||||
{
|
{
|
||||||
delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn,
|
delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn,
|
||||||
tlinfo, instance_config.xlog_seg_size, dry_run);
|
tlinfo, instance_config.xlog_seg_size, dry_run);
|
||||||
@ -714,7 +713,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
|
if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
|
||||||
delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn,
|
delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn,
|
||||||
tlinfo, instance_config.xlog_seg_size, dry_run);
|
tlinfo, instance_config.xlog_seg_size, dry_run);
|
||||||
else
|
else
|
||||||
@ -942,7 +941,7 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli
|
|||||||
join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->file.name);
|
join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->file.name);
|
||||||
|
|
||||||
/* save segment from purging */
|
/* save segment from purging */
|
||||||
if (instance_config.wal_depth >= 0 && wal_file->keep)
|
if (wal_file->keep)
|
||||||
{
|
{
|
||||||
elog(VERBOSE, "Retain WAL segment \"%s\"", wal_fullpath);
|
elog(VERBOSE, "Retain WAL segment \"%s\"", wal_fullpath);
|
||||||
continue;
|
continue;
|
||||||
@ -1027,7 +1026,7 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config,
|
|||||||
parray *backup_list, *delete_list;
|
parray *backup_list, *delete_list;
|
||||||
const char *pretty_status;
|
const char *pretty_status;
|
||||||
int n_deleted = 0, n_found = 0;
|
int n_deleted = 0, n_found = 0;
|
||||||
size_t size_to_delete = 0;
|
int64 size_to_delete = 0;
|
||||||
char size_to_delete_pretty[20];
|
char size_to_delete_pretty[20];
|
||||||
pgBackup *backup;
|
pgBackup *backup;
|
||||||
|
|
||||||
|
145
src/dir.c
145
src/dir.c
@ -8,6 +8,7 @@
|
|||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
#include "pg_probackup.h"
|
#include "pg_probackup.h"
|
||||||
#include "utils/file.h"
|
#include "utils/file.h"
|
||||||
|
|
||||||
@ -130,6 +131,9 @@ static void opt_path_map(ConfigOption *opt, const char *arg,
|
|||||||
TablespaceList *list, const char *type);
|
TablespaceList *list, const char *type);
|
||||||
static void cleanup_tablespace(const char *path);
|
static void cleanup_tablespace(const char *path);
|
||||||
|
|
||||||
|
static void control_string_bad_format(const char* str);
|
||||||
|
|
||||||
|
|
||||||
/* Tablespace mapping */
|
/* Tablespace mapping */
|
||||||
static TablespaceList tablespace_dirs = {NULL, NULL};
|
static TablespaceList tablespace_dirs = {NULL, NULL};
|
||||||
/* Extra directories mapping */
|
/* Extra directories mapping */
|
||||||
@ -1036,13 +1040,20 @@ opt_externaldir_map(ConfigOption *opt, const char *arg)
|
|||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
create_data_directories(parray *dest_files, const char *data_dir, const char *backup_dir,
|
create_data_directories(parray *dest_files, const char *data_dir, const char *backup_dir,
|
||||||
bool extract_tablespaces, bool incremental, fio_location location)
|
bool extract_tablespaces, bool incremental, fio_location location,
|
||||||
|
const char* waldir_path)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
parray *links = NULL;
|
parray *links = NULL;
|
||||||
mode_t pg_tablespace_mode = DIR_PERMISSION;
|
mode_t pg_tablespace_mode = DIR_PERMISSION;
|
||||||
char to_path[MAXPGPATH];
|
char to_path[MAXPGPATH];
|
||||||
|
|
||||||
|
if (waldir_path && !dir_is_empty(waldir_path, location))
|
||||||
|
{
|
||||||
|
elog(ERROR, "WAL directory location is not empty: \"%s\"", waldir_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* get tablespace map */
|
/* get tablespace map */
|
||||||
if (extract_tablespaces)
|
if (extract_tablespaces)
|
||||||
{
|
{
|
||||||
@ -1107,6 +1118,27 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba
|
|||||||
/* skip external directory content */
|
/* skip external directory content */
|
||||||
if (dir->external_dir_num != 0)
|
if (dir->external_dir_num != 0)
|
||||||
continue;
|
continue;
|
||||||
|
/* Create WAL directory and symlink if waldir_path is setting */
|
||||||
|
if (waldir_path && strcmp(dir->rel_path, PG_XLOG_DIR) == 0) {
|
||||||
|
/* get full path to PG_XLOG_DIR */
|
||||||
|
|
||||||
|
join_path_components(to_path, data_dir, PG_XLOG_DIR);
|
||||||
|
|
||||||
|
elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"",
|
||||||
|
waldir_path, to_path);
|
||||||
|
|
||||||
|
/* create tablespace directory from waldir_path*/
|
||||||
|
fio_mkdir(waldir_path, pg_tablespace_mode, location);
|
||||||
|
|
||||||
|
/* create link to linked_path */
|
||||||
|
if (fio_symlink(waldir_path, to_path, incremental, location) < 0)
|
||||||
|
elog(ERROR, "Could not create symbolic link \"%s\": %s",
|
||||||
|
to_path, strerror(errno));
|
||||||
|
|
||||||
|
continue;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/* tablespace_map exists */
|
/* tablespace_map exists */
|
||||||
if (links)
|
if (links)
|
||||||
@ -1467,7 +1499,7 @@ get_external_remap(char *current_dir)
|
|||||||
return current_dir;
|
return current_dir;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Parsing states for get_control_value() */
|
/* Parsing states for get_control_value_str() */
|
||||||
#define CONTROL_WAIT_NAME 1
|
#define CONTROL_WAIT_NAME 1
|
||||||
#define CONTROL_INNAME 2
|
#define CONTROL_INNAME 2
|
||||||
#define CONTROL_WAIT_COLON 3
|
#define CONTROL_WAIT_COLON 3
|
||||||
@ -1481,26 +1513,62 @@ get_external_remap(char *current_dir)
|
|||||||
* The line has the following format:
|
* The line has the following format:
|
||||||
* {"name1":"value1", "name2":"value2"}
|
* {"name1":"value1", "name2":"value2"}
|
||||||
*
|
*
|
||||||
* The value will be returned to "value_str" as string if it is not NULL. If it
|
* The value will be returned in "value_int64" as int64.
|
||||||
* is NULL the value will be returned to "value_int64" as int64.
|
*
|
||||||
|
* Returns true if the value was found in the line and parsed.
|
||||||
|
*/
|
||||||
|
bool
|
||||||
|
get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory)
|
||||||
|
{
|
||||||
|
|
||||||
|
char buf_int64[32];
|
||||||
|
|
||||||
|
assert(value_int64);
|
||||||
|
|
||||||
|
/* Set default value */
|
||||||
|
*value_int64 = 0;
|
||||||
|
|
||||||
|
if (!get_control_value_str(str, name, buf_int64, sizeof(buf_int64), is_mandatory))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!parse_int64(buf_int64, value_int64, 0))
|
||||||
|
{
|
||||||
|
/* We assume that too big value is -1 */
|
||||||
|
if (errno == ERANGE)
|
||||||
|
*value_int64 = BYTES_INVALID;
|
||||||
|
else
|
||||||
|
control_string_bad_format(str);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get value from json-like line "str" of backup_content.control file.
|
||||||
|
*
|
||||||
|
* The line has the following format:
|
||||||
|
* {"name1":"value1", "name2":"value2"}
|
||||||
|
*
|
||||||
|
* The value will be returned to "value_str" as string.
|
||||||
*
|
*
|
||||||
* Returns true if the value was found in the line.
|
* Returns true if the value was found in the line.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool
|
bool
|
||||||
get_control_value(const char *str, const char *name,
|
get_control_value_str(const char *str, const char *name,
|
||||||
char *value_str, int64 *value_int64, bool is_mandatory)
|
char *value_str, size_t value_str_size, bool is_mandatory)
|
||||||
{
|
{
|
||||||
int state = CONTROL_WAIT_NAME;
|
int state = CONTROL_WAIT_NAME;
|
||||||
char *name_ptr = (char *) name;
|
char *name_ptr = (char *) name;
|
||||||
char *buf = (char *) str;
|
char *buf = (char *) str;
|
||||||
char buf_int64[32], /* Buffer for "value_int64" */
|
char *const value_str_start = value_str;
|
||||||
*buf_int64_ptr = buf_int64;
|
|
||||||
|
|
||||||
/* Set default values */
|
assert(value_str);
|
||||||
if (value_str)
|
assert(value_str_size > 0);
|
||||||
|
|
||||||
|
/* Set default value */
|
||||||
*value_str = '\0';
|
*value_str = '\0';
|
||||||
else if (value_int64)
|
|
||||||
*value_int64 = 0;
|
|
||||||
|
|
||||||
while (*buf)
|
while (*buf)
|
||||||
{
|
{
|
||||||
@ -1510,7 +1578,7 @@ get_control_value(const char *str, const char *name,
|
|||||||
if (*buf == '"')
|
if (*buf == '"')
|
||||||
state = CONTROL_INNAME;
|
state = CONTROL_INNAME;
|
||||||
else if (IsAlpha(*buf))
|
else if (IsAlpha(*buf))
|
||||||
goto bad_format;
|
control_string_bad_format(str);
|
||||||
break;
|
break;
|
||||||
case CONTROL_INNAME:
|
case CONTROL_INNAME:
|
||||||
/* Found target field. Parse value. */
|
/* Found target field. Parse value. */
|
||||||
@ -1529,58 +1597,33 @@ get_control_value(const char *str, const char *name,
|
|||||||
if (*buf == ':')
|
if (*buf == ':')
|
||||||
state = CONTROL_WAIT_VALUE;
|
state = CONTROL_WAIT_VALUE;
|
||||||
else if (!IsSpace(*buf))
|
else if (!IsSpace(*buf))
|
||||||
goto bad_format;
|
control_string_bad_format(str);
|
||||||
break;
|
break;
|
||||||
case CONTROL_WAIT_VALUE:
|
case CONTROL_WAIT_VALUE:
|
||||||
if (*buf == '"')
|
if (*buf == '"')
|
||||||
{
|
{
|
||||||
state = CONTROL_INVALUE;
|
state = CONTROL_INVALUE;
|
||||||
buf_int64_ptr = buf_int64;
|
|
||||||
}
|
}
|
||||||
else if (IsAlpha(*buf))
|
else if (IsAlpha(*buf))
|
||||||
goto bad_format;
|
control_string_bad_format(str);
|
||||||
break;
|
break;
|
||||||
case CONTROL_INVALUE:
|
case CONTROL_INVALUE:
|
||||||
/* Value was parsed, exit */
|
/* Value was parsed, exit */
|
||||||
if (*buf == '"')
|
if (*buf == '"')
|
||||||
{
|
|
||||||
if (value_str)
|
|
||||||
{
|
{
|
||||||
*value_str = '\0';
|
*value_str = '\0';
|
||||||
}
|
|
||||||
else if (value_int64)
|
|
||||||
{
|
|
||||||
/* Length of buf_uint64 should not be greater than 31 */
|
|
||||||
if (buf_int64_ptr - buf_int64 >= 32)
|
|
||||||
elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s",
|
|
||||||
name, str, DATABASE_FILE_LIST);
|
|
||||||
|
|
||||||
*buf_int64_ptr = '\0';
|
|
||||||
if (!parse_int64(buf_int64, value_int64, 0))
|
|
||||||
{
|
|
||||||
/* We assume that too big value is -1 */
|
|
||||||
if (errno == ERANGE)
|
|
||||||
*value_int64 = BYTES_INVALID;
|
|
||||||
else
|
|
||||||
goto bad_format;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (value_str)
|
/* verify if value_str not exceeds value_str_size limits */
|
||||||
{
|
if (value_str - value_str_start >= value_str_size - 1) {
|
||||||
|
elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s",
|
||||||
|
name, str, DATABASE_FILE_LIST);
|
||||||
|
}
|
||||||
*value_str = *buf;
|
*value_str = *buf;
|
||||||
value_str++;
|
value_str++;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
*buf_int64_ptr = *buf;
|
|
||||||
buf_int64_ptr++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case CONTROL_WAIT_NEXT_NAME:
|
case CONTROL_WAIT_NEXT_NAME:
|
||||||
if (*buf == ',')
|
if (*buf == ',')
|
||||||
@ -1596,18 +1639,20 @@ get_control_value(const char *str, const char *name,
|
|||||||
|
|
||||||
/* There is no close quotes */
|
/* There is no close quotes */
|
||||||
if (state == CONTROL_INNAME || state == CONTROL_INVALUE)
|
if (state == CONTROL_INNAME || state == CONTROL_INVALUE)
|
||||||
goto bad_format;
|
control_string_bad_format(str);
|
||||||
|
|
||||||
/* Did not find target field */
|
/* Did not find target field */
|
||||||
if (is_mandatory)
|
if (is_mandatory)
|
||||||
elog(ERROR, "field \"%s\" is not found in the line %s of the file %s",
|
elog(ERROR, "field \"%s\" is not found in the line %s of the file %s",
|
||||||
name, str, DATABASE_FILE_LIST);
|
name, str, DATABASE_FILE_LIST);
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
bad_format:
|
static void
|
||||||
|
control_string_bad_format(const char* str)
|
||||||
|
{
|
||||||
elog(ERROR, "%s file has invalid format in line %s",
|
elog(ERROR, "%s file has invalid format in line %s",
|
||||||
DATABASE_FILE_LIST, str);
|
DATABASE_FILE_LIST, str);
|
||||||
return false; /* Make compiler happy */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1841,8 +1886,8 @@ read_database_map(pgBackup *backup)
|
|||||||
|
|
||||||
db_map_entry *db_entry = (db_map_entry *) pgut_malloc(sizeof(db_map_entry));
|
db_map_entry *db_entry = (db_map_entry *) pgut_malloc(sizeof(db_map_entry));
|
||||||
|
|
||||||
get_control_value(buf, "dbOid", NULL, &dbOid, true);
|
get_control_value_int64(buf, "dbOid", &dbOid, true);
|
||||||
get_control_value(buf, "datname", datname, NULL, true);
|
get_control_value_str(buf, "datname", datname, sizeof(datname), true);
|
||||||
|
|
||||||
db_entry->dbOid = dbOid;
|
db_entry->dbOid = dbOid;
|
||||||
db_entry->datname = pgut_strdup(datname);
|
db_entry->datname = pgut_strdup(datname);
|
||||||
|
14
src/help.c
14
src/help.c
@ -169,6 +169,7 @@ help_pg_probackup(void)
|
|||||||
printf(_(" [-T OLDDIR=NEWDIR] [--progress]\n"));
|
printf(_(" [-T OLDDIR=NEWDIR] [--progress]\n"));
|
||||||
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
|
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
|
||||||
printf(_(" [--skip-external-dirs] [--no-sync]\n"));
|
printf(_(" [--skip-external-dirs] [--no-sync]\n"));
|
||||||
|
printf(_(" [-X WALDIR | --waldir=WALDIR]\n"));
|
||||||
printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n"));
|
printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n"));
|
||||||
printf(_(" [--db-include | --db-exclude]\n"));
|
printf(_(" [--db-include | --db-exclude]\n"));
|
||||||
printf(_(" [--remote-proto] [--remote-host]\n"));
|
printf(_(" [--remote-proto] [--remote-host]\n"));
|
||||||
@ -261,15 +262,16 @@ help_pg_probackup(void)
|
|||||||
printf(_(" [--remote-proto] [--remote-host]\n"));
|
printf(_(" [--remote-proto] [--remote-host]\n"));
|
||||||
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
|
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
|
||||||
printf(_(" [--ssh-options]\n"));
|
printf(_(" [--ssh-options]\n"));
|
||||||
|
printf(_(" [--dry-run]\n"));
|
||||||
printf(_(" [--help]\n"));
|
printf(_(" [--help]\n"));
|
||||||
|
|
||||||
if ((PROGRAM_URL || PROGRAM_EMAIL))
|
if ((PROGRAM_URL || PROGRAM_EMAIL))
|
||||||
{
|
{
|
||||||
printf("\n");
|
printf("\n");
|
||||||
if (PROGRAM_URL)
|
if (PROGRAM_URL)
|
||||||
printf("Read the website for details. <%s>\n", PROGRAM_URL);
|
printf(_("Read the website for details <%s>.\n"), PROGRAM_URL);
|
||||||
if (PROGRAM_EMAIL)
|
if (PROGRAM_EMAIL)
|
||||||
printf("Report bugs to <%s>.\n", PROGRAM_EMAIL);
|
printf(_("Report bugs to <%s>.\n"), PROGRAM_EMAIL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -434,6 +436,7 @@ help_restore(void)
|
|||||||
printf(_(" [-T OLDDIR=NEWDIR]\n"));
|
printf(_(" [-T OLDDIR=NEWDIR]\n"));
|
||||||
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
|
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
|
||||||
printf(_(" [--skip-external-dirs]\n"));
|
printf(_(" [--skip-external-dirs]\n"));
|
||||||
|
printf(_(" [-X WALDIR | --waldir=WALDIR]\n"));
|
||||||
printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n"));
|
printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n"));
|
||||||
printf(_(" [--db-include dbname | --db-exclude dbname]\n"));
|
printf(_(" [--db-include dbname | --db-exclude dbname]\n"));
|
||||||
printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n"));
|
printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n"));
|
||||||
@ -471,6 +474,10 @@ help_restore(void)
|
|||||||
printf(_(" relocate the external directory from OLDDIR to NEWDIR\n"));
|
printf(_(" relocate the external directory from OLDDIR to NEWDIR\n"));
|
||||||
printf(_(" --skip-external-dirs do not restore all external directories\n"));
|
printf(_(" --skip-external-dirs do not restore all external directories\n"));
|
||||||
|
|
||||||
|
|
||||||
|
printf(_(" -X, --waldir=WALDIR location for the write-ahead log directory\n"));
|
||||||
|
|
||||||
|
|
||||||
printf(_("\n Incremental restore options:\n"));
|
printf(_("\n Incremental restore options:\n"));
|
||||||
printf(_(" -I, --incremental-mode=none|checksum|lsn\n"));
|
printf(_(" -I, --incremental-mode=none|checksum|lsn\n"));
|
||||||
printf(_(" reuse valid pages available in PGDATA if they have not changed\n"));
|
printf(_(" reuse valid pages available in PGDATA if they have not changed\n"));
|
||||||
@ -1047,6 +1054,7 @@ help_catchup(void)
|
|||||||
printf(_(" [--remote-proto] [--remote-host]\n"));
|
printf(_(" [--remote-proto] [--remote-host]\n"));
|
||||||
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
|
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
|
||||||
printf(_(" [--ssh-options]\n"));
|
printf(_(" [--ssh-options]\n"));
|
||||||
|
printf(_(" [--dry-run]\n"));
|
||||||
printf(_(" [--help]\n\n"));
|
printf(_(" [--help]\n\n"));
|
||||||
|
|
||||||
printf(_(" -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n"));
|
printf(_(" -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n"));
|
||||||
@ -1081,4 +1089,6 @@ help_catchup(void)
|
|||||||
printf(_(" --remote-user=username user name for ssh connection (default: current user)\n"));
|
printf(_(" --remote-user=username user name for ssh connection (default: current user)\n"));
|
||||||
printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n"));
|
printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n"));
|
||||||
printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n"));
|
printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n"));
|
||||||
|
|
||||||
|
printf(_(" --dry-run perform a trial run without any changes\n\n"));
|
||||||
}
|
}
|
||||||
|
@ -614,7 +614,7 @@ merge_chain(InstanceState *instanceState,
|
|||||||
|
|
||||||
/* Create directories */
|
/* Create directories */
|
||||||
create_data_directories(dest_backup->files, full_database_dir,
|
create_data_directories(dest_backup->files, full_database_dir,
|
||||||
dest_backup->root_dir, false, false, FIO_BACKUP_HOST);
|
dest_backup->root_dir, false, false, FIO_BACKUP_HOST, NULL);
|
||||||
|
|
||||||
/* External directories stuff */
|
/* External directories stuff */
|
||||||
if (dest_backup->external_dir_str)
|
if (dest_backup->external_dir_str)
|
||||||
|
@ -122,6 +122,7 @@ static parray *datname_include_list = NULL;
|
|||||||
/* arrays for --exclude-path's */
|
/* arrays for --exclude-path's */
|
||||||
static parray *exclude_absolute_paths_list = NULL;
|
static parray *exclude_absolute_paths_list = NULL;
|
||||||
static parray *exclude_relative_paths_list = NULL;
|
static parray *exclude_relative_paths_list = NULL;
|
||||||
|
static char* gl_waldir_path = NULL;
|
||||||
|
|
||||||
/* checkdb options */
|
/* checkdb options */
|
||||||
bool need_amcheck = false;
|
bool need_amcheck = false;
|
||||||
@ -238,6 +239,7 @@ static ConfigOption cmd_options[] =
|
|||||||
{ 's', 160, "primary-conninfo", &primary_conninfo, SOURCE_CMD_STRICT },
|
{ 's', 160, "primary-conninfo", &primary_conninfo, SOURCE_CMD_STRICT },
|
||||||
{ 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT },
|
{ 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT },
|
||||||
{ 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT },
|
{ 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT },
|
||||||
|
{ 's', 'X', "waldir", &gl_waldir_path, SOURCE_CMD_STRICT },
|
||||||
/* checkdb options */
|
/* checkdb options */
|
||||||
{ 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT },
|
{ 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT },
|
||||||
{ 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT },
|
{ 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT },
|
||||||
@ -308,6 +310,7 @@ main(int argc, char *argv[])
|
|||||||
init_config(&instance_config, instance_name);
|
init_config(&instance_config, instance_name);
|
||||||
|
|
||||||
PROGRAM_NAME = get_progname(argv[0]);
|
PROGRAM_NAME = get_progname(argv[0]);
|
||||||
|
set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_probackup"));
|
||||||
PROGRAM_FULL_PATH = palloc0(MAXPGPATH);
|
PROGRAM_FULL_PATH = palloc0(MAXPGPATH);
|
||||||
|
|
||||||
/* Get current time */
|
/* Get current time */
|
||||||
@ -753,6 +756,21 @@ main(int argc, char *argv[])
|
|||||||
restore_params->partial_restore_type = INCLUDE;
|
restore_params->partial_restore_type = INCLUDE;
|
||||||
restore_params->partial_db_list = datname_include_list;
|
restore_params->partial_db_list = datname_include_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (gl_waldir_path)
|
||||||
|
{
|
||||||
|
/* clean up xlog directory name, check it's absolute */
|
||||||
|
canonicalize_path(gl_waldir_path);
|
||||||
|
if (!is_absolute_path(gl_waldir_path))
|
||||||
|
{
|
||||||
|
elog(ERROR, "WAL directory location must be an absolute path");
|
||||||
|
}
|
||||||
|
if (strlen(gl_waldir_path) > MAXPGPATH)
|
||||||
|
elog(ERROR, "Value specified to --waldir is too long");
|
||||||
|
|
||||||
|
}
|
||||||
|
restore_params->waldir = gl_waldir_path;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -338,7 +338,7 @@ typedef enum ShowFormat
|
|||||||
#define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */
|
#define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */
|
||||||
#define FILE_NOT_FOUND (-2) /* file disappeared during backup */
|
#define FILE_NOT_FOUND (-2) /* file disappeared during backup */
|
||||||
#define BLOCKNUM_INVALID (-1)
|
#define BLOCKNUM_INVALID (-1)
|
||||||
#define PROGRAM_VERSION "2.5.5"
|
#define PROGRAM_VERSION "2.5.6"
|
||||||
|
|
||||||
/* update when remote agent API or behaviour changes */
|
/* update when remote agent API or behaviour changes */
|
||||||
#define AGENT_PROTOCOL_VERSION 20501
|
#define AGENT_PROTOCOL_VERSION 20501
|
||||||
@ -566,6 +566,8 @@ typedef struct pgRestoreParams
|
|||||||
/* options for partial restore */
|
/* options for partial restore */
|
||||||
PartialRestoreType partial_restore_type;
|
PartialRestoreType partial_restore_type;
|
||||||
parray *partial_db_list;
|
parray *partial_db_list;
|
||||||
|
|
||||||
|
char* waldir;
|
||||||
} pgRestoreParams;
|
} pgRestoreParams;
|
||||||
|
|
||||||
/* Options needed for set-backup command */
|
/* Options needed for set-backup command */
|
||||||
@ -1010,8 +1012,9 @@ extern CompressAlg parse_compress_alg(const char *arg);
|
|||||||
extern const char* deparse_compress_alg(int alg);
|
extern const char* deparse_compress_alg(int alg);
|
||||||
|
|
||||||
/* in dir.c */
|
/* in dir.c */
|
||||||
extern bool get_control_value(const char *str, const char *name,
|
extern bool get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory);
|
||||||
char *value_str, int64 *value_int64, bool is_mandatory);
|
extern bool get_control_value_str(const char *str, const char *name,
|
||||||
|
char *value_str, size_t value_str_size, bool is_mandatory);
|
||||||
extern void dir_list_file(parray *files, const char *root, bool exclude,
|
extern void dir_list_file(parray *files, const char *root, bool exclude,
|
||||||
bool follow_symlink, bool add_root, bool backup_logs,
|
bool follow_symlink, bool add_root, bool backup_logs,
|
||||||
bool skip_hidden, int external_dir_num, fio_location location);
|
bool skip_hidden, int external_dir_num, fio_location location);
|
||||||
@ -1022,7 +1025,8 @@ extern void create_data_directories(parray *dest_files,
|
|||||||
const char *backup_dir,
|
const char *backup_dir,
|
||||||
bool extract_tablespaces,
|
bool extract_tablespaces,
|
||||||
bool incremental,
|
bool incremental,
|
||||||
fio_location location);
|
fio_location location,
|
||||||
|
const char *waldir_path);
|
||||||
|
|
||||||
extern void read_tablespace_map(parray *links, const char *backup_dir);
|
extern void read_tablespace_map(parray *links, const char *backup_dir);
|
||||||
extern void opt_tablespace_map(ConfigOption *opt, const char *arg);
|
extern void opt_tablespace_map(ConfigOption *opt, const char *arg);
|
||||||
|
@ -801,7 +801,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
|
|||||||
create_data_directories(dest_files, instance_config.pgdata,
|
create_data_directories(dest_files, instance_config.pgdata,
|
||||||
dest_backup->root_dir, backup_has_tblspc,
|
dest_backup->root_dir, backup_has_tblspc,
|
||||||
params->incremental_mode != INCR_NONE,
|
params->incremental_mode != INCR_NONE,
|
||||||
FIO_DB_HOST);
|
FIO_DB_HOST, params->waldir);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore dest_backup external directories.
|
* Restore dest_backup external directories.
|
||||||
|
@ -489,8 +489,10 @@ fio_disconnect(void)
|
|||||||
Assert(hdr.cop == FIO_DISCONNECTED);
|
Assert(hdr.cop == FIO_DISCONNECTED);
|
||||||
SYS_CHECK(close(fio_stdin));
|
SYS_CHECK(close(fio_stdin));
|
||||||
SYS_CHECK(close(fio_stdout));
|
SYS_CHECK(close(fio_stdout));
|
||||||
|
SYS_CHECK(close(fio_stderr));
|
||||||
fio_stdin = 0;
|
fio_stdin = 0;
|
||||||
fio_stdout = 0;
|
fio_stdout = 0;
|
||||||
|
fio_stderr = 0;
|
||||||
wait_ssh();
|
wait_ssh();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3403,7 +3405,8 @@ fio_communicate(int in, int out)
|
|||||||
case FIO_DISCONNECT:
|
case FIO_DISCONNECT:
|
||||||
hdr.cop = FIO_DISCONNECTED;
|
hdr.cop = FIO_DISCONNECTED;
|
||||||
IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));
|
IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));
|
||||||
break;
|
free(buf);
|
||||||
|
return;
|
||||||
case FIO_GET_ASYNC_ERROR:
|
case FIO_GET_ASYNC_ERROR:
|
||||||
fio_get_async_error_impl(out);
|
fio_get_async_error_impl(out);
|
||||||
break;
|
break;
|
||||||
|
@ -147,6 +147,9 @@ bool launch_agent(void)
|
|||||||
ssh_argv[ssh_argc++] = "-o";
|
ssh_argv[ssh_argc++] = "-o";
|
||||||
ssh_argv[ssh_argc++] = "Compression=no";
|
ssh_argv[ssh_argc++] = "Compression=no";
|
||||||
|
|
||||||
|
ssh_argv[ssh_argc++] = "-o";
|
||||||
|
ssh_argv[ssh_argc++] = "ControlMaster=no";
|
||||||
|
|
||||||
ssh_argv[ssh_argc++] = "-o";
|
ssh_argv[ssh_argc++] = "-o";
|
||||||
ssh_argv[ssh_argc++] = "LogLevel=error";
|
ssh_argv[ssh_argc++] = "LogLevel=error";
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup)
|
****[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup)
|
||||||
|
|
||||||
```
|
```
|
||||||
Note: For now these tests work on Linux and "kinda" work on Windows
|
Note: For now these tests work on Linux and "kinda" work on Windows
|
||||||
@ -31,7 +31,7 @@ Remote backup depends on key authentication to local machine via ssh as current
|
|||||||
export PGPROBACKUP_SSH_REMOTE=ON
|
export PGPROBACKUP_SSH_REMOTE=ON
|
||||||
|
|
||||||
Run tests that are relied on advanced debugging features. For this mode, pg_probackup should be compiled without optimizations. For example:
|
Run tests that are relied on advanced debugging features. For this mode, pg_probackup should be compiled without optimizations. For example:
|
||||||
CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests
|
CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls
|
||||||
|
|
||||||
export PGPROBACKUP_GDB=ON
|
export PGPROBACKUP_GDB=ON
|
||||||
|
|
||||||
@ -41,6 +41,8 @@ Run suit of basic simple tests:
|
|||||||
Run ptrack tests:
|
Run ptrack tests:
|
||||||
export PG_PROBACKUP_PTRACK=ON
|
export PG_PROBACKUP_PTRACK=ON
|
||||||
|
|
||||||
|
Run long (time consuming) tests:
|
||||||
|
export PG_PROBACKUP_LONG=ON
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope
|
sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope
|
||||||
@ -48,3 +50,20 @@ Usage:
|
|||||||
export PG_CONFIG=/path/to/pg_config
|
export PG_CONFIG=/path/to/pg_config
|
||||||
python -m unittest [-v] tests[.specific_module][.class.test]
|
python -m unittest [-v] tests[.specific_module][.class.test]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Troubleshooting FAQ
|
||||||
|
|
||||||
|
## Python tests failure
|
||||||
|
### 1. Could not open extension "..."
|
||||||
|
```
|
||||||
|
testgres.exceptions.QueryException ERROR: could not open extension control file "<postgres_build_dir>/share/extension/amcheck.control": No such file or directory
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Solution:
|
||||||
|
|
||||||
|
You have no `<postgres_src_root>/contrib/...` extension installed, please do
|
||||||
|
|
||||||
|
```commandline
|
||||||
|
cd <postgres_src_root>
|
||||||
|
make install-world
|
||||||
|
```
|
||||||
|
@ -7,7 +7,7 @@ from . import init, merge, option, show, compatibility, \
|
|||||||
compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \
|
compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \
|
||||||
cfs_validate_backup, auth_test, time_stamp, logging, \
|
cfs_validate_backup, auth_test, time_stamp, logging, \
|
||||||
locking, remote, external, config, checkdb, set_backup, incr_restore, \
|
locking, remote, external, config, checkdb, set_backup, incr_restore, \
|
||||||
catchup, CVE_2018_1058
|
catchup, CVE_2018_1058, time_consuming
|
||||||
|
|
||||||
|
|
||||||
def load_tests(loader, tests, pattern):
|
def load_tests(loader, tests, pattern):
|
||||||
@ -21,6 +21,12 @@ def load_tests(loader, tests, pattern):
|
|||||||
if os.environ['PG_PROBACKUP_PTRACK'] == 'ON':
|
if os.environ['PG_PROBACKUP_PTRACK'] == 'ON':
|
||||||
suite.addTests(loader.loadTestsFromModule(ptrack))
|
suite.addTests(loader.loadTestsFromModule(ptrack))
|
||||||
|
|
||||||
|
# PG_PROBACKUP_LONG section for tests that are long
|
||||||
|
# by design e.g. they contain loops, sleeps and so on
|
||||||
|
if 'PG_PROBACKUP_LONG' in os.environ:
|
||||||
|
if os.environ['PG_PROBACKUP_LONG'] == 'ON':
|
||||||
|
suite.addTests(loader.loadTestsFromModule(time_consuming))
|
||||||
|
|
||||||
# suite.addTests(loader.loadTestsFromModule(auth_test))
|
# suite.addTests(loader.loadTestsFromModule(auth_test))
|
||||||
suite.addTests(loader.loadTestsFromModule(archive))
|
suite.addTests(loader.loadTestsFromModule(archive))
|
||||||
suite.addTests(loader.loadTestsFromModule(backup))
|
suite.addTests(loader.loadTestsFromModule(backup))
|
||||||
|
@ -228,6 +228,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
Check pg_stop_backup_timeout, needed backup_timeout
|
Check pg_stop_backup_timeout, needed backup_timeout
|
||||||
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
|
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -290,6 +292,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
Check pg_stop_backup_timeout, libpq-timeout requested.
|
Check pg_stop_backup_timeout, libpq-timeout requested.
|
||||||
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
|
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -313,7 +317,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
|||||||
gdb.set_breakpoint('pg_stop_backup')
|
gdb.set_breakpoint('pg_stop_backup')
|
||||||
gdb.run_until_break()
|
gdb.run_until_break()
|
||||||
|
|
||||||
self.set_auto_conf(node, {'archive_command': "'exit 1'"})
|
self.set_auto_conf(node, {'archive_command': 'exit 1'})
|
||||||
node.reload()
|
node.reload()
|
||||||
|
|
||||||
os.environ["PGAPPNAME"] = "foo"
|
os.environ["PGAPPNAME"] = "foo"
|
||||||
|
@ -1095,6 +1095,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_drop_rel_during_full_backup(self):
|
def test_drop_rel_during_full_backup(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1244,6 +1246,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_drop_rel_during_backup_delta(self):
|
def test_drop_rel_during_backup_delta(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1313,6 +1317,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_drop_rel_during_backup_page(self):
|
def test_drop_rel_during_backup_page(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1445,6 +1451,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_backup_concurrent_drop_table(self):
|
def test_backup_concurrent_drop_table(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1579,6 +1587,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_sigint_handling(self):
|
def test_sigint_handling(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1618,6 +1628,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_sigterm_handling(self):
|
def test_sigterm_handling(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1656,6 +1668,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_sigquit_handling(self):
|
def test_sigquit_handling(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -2905,6 +2919,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_missing_wal_segment(self):
|
def test_missing_wal_segment(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -3295,6 +3311,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_backup_atexit(self):
|
def test_backup_atexit(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
|
154
tests/catchup.py
154
tests/catchup.py
@ -1455,3 +1455,157 @@ class CatchupTest(ProbackupTest, unittest.TestCase):
|
|||||||
dst_pg.stop()
|
dst_pg.stop()
|
||||||
#self.assertEqual(1, 0, 'Stop test')
|
#self.assertEqual(1, 0, 'Stop test')
|
||||||
self.del_test_dir(module_name, self.fname)
|
self.del_test_dir(module_name, self.fname)
|
||||||
|
|
||||||
|
#########################################
|
||||||
|
# --dry-run
|
||||||
|
#########################################
|
||||||
|
def test_dry_run_catchup_full(self):
|
||||||
|
"""
|
||||||
|
Test dry-run option for full catchup
|
||||||
|
"""
|
||||||
|
# preparation 1: source
|
||||||
|
src_pg = self.make_simple_node(
|
||||||
|
base_dir = os.path.join(module_name, self.fname, 'src'),
|
||||||
|
set_replication = True
|
||||||
|
)
|
||||||
|
src_pg.slow_start()
|
||||||
|
|
||||||
|
# preparation 2: make clean shutdowned lagging behind replica
|
||||||
|
dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
|
||||||
|
|
||||||
|
src_pg.pgbench_init(scale = 10)
|
||||||
|
pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
|
||||||
|
pgbench.wait()
|
||||||
|
|
||||||
|
# save the condition before dry-run
|
||||||
|
content_before = self.pgdata_content(dst_pg.data_dir)
|
||||||
|
|
||||||
|
# do full catchup
|
||||||
|
self.catchup_node(
|
||||||
|
backup_mode = 'FULL',
|
||||||
|
source_pgdata = src_pg.data_dir,
|
||||||
|
destination_node = dst_pg,
|
||||||
|
options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run']
|
||||||
|
)
|
||||||
|
|
||||||
|
# compare data dirs before and after catchup
|
||||||
|
self.compare_pgdata(
|
||||||
|
content_before,
|
||||||
|
self.pgdata_content(dst_pg.data_dir)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
src_pg.stop()
|
||||||
|
self.del_test_dir(module_name, self.fname)
|
||||||
|
|
||||||
|
def test_dry_run_catchup_ptrack(self):
|
||||||
|
"""
|
||||||
|
Test dry-run option for catchup in incremental ptrack mode
|
||||||
|
"""
|
||||||
|
if not self.ptrack:
|
||||||
|
return unittest.skip('Skipped because ptrack support is disabled')
|
||||||
|
|
||||||
|
# preparation 1: source
|
||||||
|
src_pg = self.make_simple_node(
|
||||||
|
base_dir = os.path.join(module_name, self.fname, 'src'),
|
||||||
|
set_replication = True,
|
||||||
|
ptrack_enable = True,
|
||||||
|
initdb_params = ['--data-checksums']
|
||||||
|
)
|
||||||
|
src_pg.slow_start()
|
||||||
|
src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack")
|
||||||
|
|
||||||
|
src_pg.pgbench_init(scale = 10)
|
||||||
|
pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
|
||||||
|
pgbench.wait()
|
||||||
|
|
||||||
|
# preparation 2: make clean shutdowned lagging behind replica
|
||||||
|
dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
|
||||||
|
self.catchup_node(
|
||||||
|
backup_mode = 'FULL',
|
||||||
|
source_pgdata = src_pg.data_dir,
|
||||||
|
destination_node = dst_pg,
|
||||||
|
options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
|
||||||
|
)
|
||||||
|
self.set_replica(src_pg, dst_pg)
|
||||||
|
dst_options = {}
|
||||||
|
dst_options['port'] = str(dst_pg.port)
|
||||||
|
self.set_auto_conf(dst_pg, dst_options)
|
||||||
|
dst_pg.slow_start(replica = True)
|
||||||
|
dst_pg.stop()
|
||||||
|
|
||||||
|
# save the condition before dry-run
|
||||||
|
content_before = self.pgdata_content(dst_pg.data_dir)
|
||||||
|
|
||||||
|
# do incremental catchup
|
||||||
|
self.catchup_node(
|
||||||
|
backup_mode = 'PTRACK',
|
||||||
|
source_pgdata = src_pg.data_dir,
|
||||||
|
destination_node = dst_pg,
|
||||||
|
options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run']
|
||||||
|
)
|
||||||
|
|
||||||
|
# compare data dirs before and after cathup
|
||||||
|
self.compare_pgdata(
|
||||||
|
content_before,
|
||||||
|
self.pgdata_content(dst_pg.data_dir)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
src_pg.stop()
|
||||||
|
self.del_test_dir(module_name, self.fname)
|
||||||
|
|
||||||
|
def test_dry_run_catchup_delta(self):
|
||||||
|
"""
|
||||||
|
Test dry-run option for catchup in incremental delta mode
|
||||||
|
"""
|
||||||
|
|
||||||
|
# preparation 1: source
|
||||||
|
src_pg = self.make_simple_node(
|
||||||
|
base_dir = os.path.join(module_name, self.fname, 'src'),
|
||||||
|
set_replication = True,
|
||||||
|
initdb_params = ['--data-checksums'],
|
||||||
|
pg_options = { 'wal_log_hints': 'on' }
|
||||||
|
)
|
||||||
|
src_pg.slow_start()
|
||||||
|
|
||||||
|
src_pg.pgbench_init(scale = 10)
|
||||||
|
pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
|
||||||
|
pgbench.wait()
|
||||||
|
|
||||||
|
# preparation 2: make clean shutdowned lagging behind replica
|
||||||
|
dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
|
||||||
|
self.catchup_node(
|
||||||
|
backup_mode = 'FULL',
|
||||||
|
source_pgdata = src_pg.data_dir,
|
||||||
|
destination_node = dst_pg,
|
||||||
|
options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
|
||||||
|
)
|
||||||
|
self.set_replica(src_pg, dst_pg)
|
||||||
|
dst_options = {}
|
||||||
|
dst_options['port'] = str(dst_pg.port)
|
||||||
|
self.set_auto_conf(dst_pg, dst_options)
|
||||||
|
dst_pg.slow_start(replica = True)
|
||||||
|
dst_pg.stop()
|
||||||
|
|
||||||
|
# save the condition before dry-run
|
||||||
|
content_before = self.pgdata_content(dst_pg.data_dir)
|
||||||
|
|
||||||
|
# do delta catchup
|
||||||
|
self.catchup_node(
|
||||||
|
backup_mode = 'DELTA',
|
||||||
|
source_pgdata = src_pg.data_dir,
|
||||||
|
destination_node = dst_pg,
|
||||||
|
options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', "--dry-run"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# compare data dirs before and after cathup
|
||||||
|
self.compare_pgdata(
|
||||||
|
content_before,
|
||||||
|
self.pgdata_content(dst_pg.data_dir)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
src_pg.stop()
|
||||||
|
self.del_test_dir(module_name, self.fname)
|
||||||
|
|
||||||
|
@ -17,6 +17,8 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_checkdb_amcheck_only_sanity(self):
|
def test_checkdb_amcheck_only_sanity(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -544,11 +546,8 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_checkdb_sigint_handling(self):
|
def test_checkdb_sigint_handling(self):
|
||||||
""""""
|
""""""
|
||||||
if not self.gdb:
|
self._check_gdb_flag_or_skip_test()
|
||||||
self.skipTest(
|
|
||||||
"Specify PGPROBACKUP_GDB and build without "
|
|
||||||
"optimizations for run this test"
|
|
||||||
)
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
|
@ -472,11 +472,8 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
|||||||
make node, make full and delta stream backups,
|
make node, make full and delta stream backups,
|
||||||
restore them and check data correctness
|
restore them and check data correctness
|
||||||
"""
|
"""
|
||||||
if not self.gdb:
|
self._check_gdb_flag_or_skip_test()
|
||||||
self.skipTest(
|
|
||||||
"Specify PGPROBACKUP_GDB and build without "
|
|
||||||
"optimizations for run this test"
|
|
||||||
)
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
|
@ -203,8 +203,10 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_exclude_unlogged_tables_2(self):
|
def test_exclude_unlogged_tables_2(self):
|
||||||
"""
|
"""
|
||||||
make node, create unlogged, take FULL, check
|
1. make node, create unlogged, take FULL, DELTA, PAGE,
|
||||||
that unlogged was not backed up
|
check that unlogged table files was not backed up
|
||||||
|
2. restore FULL, DELTA, PAGE to empty db,
|
||||||
|
ensure unlogged table exist and is epmty
|
||||||
"""
|
"""
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
@ -220,6 +222,8 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
|
|||||||
self.set_archiving(backup_dir, 'node', node)
|
self.set_archiving(backup_dir, 'node', node)
|
||||||
node.slow_start()
|
node.slow_start()
|
||||||
|
|
||||||
|
backup_ids = []
|
||||||
|
|
||||||
for backup_type in ['full', 'delta', 'page']:
|
for backup_type in ['full', 'delta', 'page']:
|
||||||
|
|
||||||
if backup_type == 'full':
|
if backup_type == 'full':
|
||||||
@ -231,14 +235,16 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
|
|||||||
'postgres',
|
'postgres',
|
||||||
'insert into test select generate_series(0,20050000)::text')
|
'insert into test select generate_series(0,20050000)::text')
|
||||||
|
|
||||||
rel_path = node.safe_psql(
|
rel_path = node.execute(
|
||||||
'postgres',
|
'postgres',
|
||||||
"select pg_relation_filepath('test')").decode('utf-8').rstrip()
|
"select pg_relation_filepath('test')")[0][0]
|
||||||
|
|
||||||
backup_id = self.backup_node(
|
backup_id = self.backup_node(
|
||||||
backup_dir, 'node', node,
|
backup_dir, 'node', node,
|
||||||
backup_type=backup_type, options=['--stream'])
|
backup_type=backup_type, options=['--stream'])
|
||||||
|
|
||||||
|
backup_ids.append(backup_id)
|
||||||
|
|
||||||
filelist = self.get_backup_filelist(
|
filelist = self.get_backup_filelist(
|
||||||
backup_dir, 'node', backup_id)
|
backup_dir, 'node', backup_id)
|
||||||
|
|
||||||
@ -258,9 +264,25 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
|
|||||||
rel_path + '.3', filelist,
|
rel_path + '.3', filelist,
|
||||||
"Unlogged table was not excluded")
|
"Unlogged table was not excluded")
|
||||||
|
|
||||||
|
# ensure restoring retrieves back only empty unlogged table
|
||||||
|
for backup_id in backup_ids:
|
||||||
|
node.stop()
|
||||||
|
node.cleanup()
|
||||||
|
|
||||||
|
self.restore_node(backup_dir, 'node', node, backup_id=backup_id)
|
||||||
|
|
||||||
|
node.slow_start()
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
node.execute(
|
||||||
|
'postgres',
|
||||||
|
'select count(*) from test')[0][0],
|
||||||
|
0)
|
||||||
|
|
||||||
# Clean after yourself
|
# Clean after yourself
|
||||||
self.del_test_dir(module_name, fname)
|
self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
|
|
||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_exclude_log_dir(self):
|
def test_exclude_log_dir(self):
|
||||||
"""
|
"""
|
||||||
|
@ -86,6 +86,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
|||||||
[-T OLDDIR=NEWDIR] [--progress]
|
[-T OLDDIR=NEWDIR] [--progress]
|
||||||
[--external-mapping=OLDDIR=NEWDIR]
|
[--external-mapping=OLDDIR=NEWDIR]
|
||||||
[--skip-external-dirs] [--no-sync]
|
[--skip-external-dirs] [--no-sync]
|
||||||
|
[-X WALDIR | --waldir=WALDIR]
|
||||||
[-I | --incremental-mode=none|checksum|lsn]
|
[-I | --incremental-mode=none|checksum|lsn]
|
||||||
[--db-include | --db-exclude]
|
[--db-include | --db-exclude]
|
||||||
[--remote-proto] [--remote-host]
|
[--remote-proto] [--remote-host]
|
||||||
@ -178,7 +179,8 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
|||||||
[--remote-proto] [--remote-host]
|
[--remote-proto] [--remote-host]
|
||||||
[--remote-port] [--remote-path] [--remote-user]
|
[--remote-port] [--remote-path] [--remote-user]
|
||||||
[--ssh-options]
|
[--ssh-options]
|
||||||
|
[--dry-run]
|
||||||
[--help]
|
[--help]
|
||||||
|
|
||||||
Read the website for details. <https://github.com/postgrespro/pg_probackup>
|
Read the website for details <https://github.com/postgrespro/pg_probackup>.
|
||||||
Report bugs to <https://github.com/postgrespro/pg_probackup/issues>.
|
Report bugs to <https://github.com/postgrespro/pg_probackup/issues>.
|
||||||
|
186
tests/expected/option_help_ru.out
Normal file
186
tests/expected/option_help_ru.out
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
|
||||||
|
pg_probackup - утилита для управления резервным копированием/восстановлением базы данных PostgreSQL.
|
||||||
|
|
||||||
|
pg_probackup help [COMMAND]
|
||||||
|
|
||||||
|
pg_probackup version
|
||||||
|
|
||||||
|
pg_probackup init -B backup-path
|
||||||
|
|
||||||
|
pg_probackup set-config -B backup-path --instance=instance_name
|
||||||
|
[-D pgdata-path]
|
||||||
|
[--external-dirs=external-directories-paths]
|
||||||
|
[--log-level-console=log-level-console]
|
||||||
|
[--log-level-file=log-level-file]
|
||||||
|
[--log-filename=log-filename]
|
||||||
|
[--error-log-filename=error-log-filename]
|
||||||
|
[--log-directory=log-directory]
|
||||||
|
[--log-rotation-size=log-rotation-size]
|
||||||
|
[--log-rotation-age=log-rotation-age]
|
||||||
|
[--retention-redundancy=retention-redundancy]
|
||||||
|
[--retention-window=retention-window]
|
||||||
|
[--wal-depth=wal-depth]
|
||||||
|
[--compress-algorithm=compress-algorithm]
|
||||||
|
[--compress-level=compress-level]
|
||||||
|
[--archive-timeout=timeout]
|
||||||
|
[-d dbname] [-h host] [-p port] [-U username]
|
||||||
|
[--remote-proto] [--remote-host]
|
||||||
|
[--remote-port] [--remote-path] [--remote-user]
|
||||||
|
[--ssh-options]
|
||||||
|
[--restore-command=cmdline] [--archive-host=destination]
|
||||||
|
[--archive-port=port] [--archive-user=username]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup set-backup -B backup-path --instance=instance_name
|
||||||
|
-i backup-id [--ttl=interval] [--expire-time=timestamp]
|
||||||
|
[--note=text]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup show-config -B backup-path --instance=instance_name
|
||||||
|
[--format=format]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
|
||||||
|
[-D pgdata-path] [-C]
|
||||||
|
[--stream [-S slot-name] [--temp-slot]]
|
||||||
|
[--backup-pg-log] [-j num-threads] [--progress]
|
||||||
|
[--no-validate] [--skip-block-validation]
|
||||||
|
[--external-dirs=external-directories-paths]
|
||||||
|
[--no-sync]
|
||||||
|
[--log-level-console=log-level-console]
|
||||||
|
[--log-level-file=log-level-file]
|
||||||
|
[--log-filename=log-filename]
|
||||||
|
[--error-log-filename=error-log-filename]
|
||||||
|
[--log-directory=log-directory]
|
||||||
|
[--log-rotation-size=log-rotation-size]
|
||||||
|
[--log-rotation-age=log-rotation-age] [--no-color]
|
||||||
|
[--delete-expired] [--delete-wal] [--merge-expired]
|
||||||
|
[--retention-redundancy=retention-redundancy]
|
||||||
|
[--retention-window=retention-window]
|
||||||
|
[--wal-depth=wal-depth]
|
||||||
|
[--compress]
|
||||||
|
[--compress-algorithm=compress-algorithm]
|
||||||
|
[--compress-level=compress-level]
|
||||||
|
[--archive-timeout=archive-timeout]
|
||||||
|
[-d dbname] [-h host] [-p port] [-U username]
|
||||||
|
[-w --no-password] [-W --password]
|
||||||
|
[--remote-proto] [--remote-host]
|
||||||
|
[--remote-port] [--remote-path] [--remote-user]
|
||||||
|
[--ssh-options]
|
||||||
|
[--ttl=interval] [--expire-time=timestamp] [--note=text]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup restore -B backup-path --instance=instance_name
|
||||||
|
[-D pgdata-path] [-i backup-id] [-j num-threads]
|
||||||
|
[--recovery-target-time=time|--recovery-target-xid=xid
|
||||||
|
|--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]
|
||||||
|
[--recovery-target-timeline=timeline]
|
||||||
|
[--recovery-target=immediate|latest]
|
||||||
|
[--recovery-target-name=target-name]
|
||||||
|
[--recovery-target-action=pause|promote|shutdown]
|
||||||
|
[--restore-command=cmdline]
|
||||||
|
[-R | --restore-as-replica] [--force]
|
||||||
|
[--primary-conninfo=primary_conninfo]
|
||||||
|
[-S | --primary-slot-name=slotname]
|
||||||
|
[--no-validate] [--skip-block-validation]
|
||||||
|
[-T OLDDIR=NEWDIR] [--progress]
|
||||||
|
[--external-mapping=OLDDIR=NEWDIR]
|
||||||
|
[--skip-external-dirs] [--no-sync]
|
||||||
|
[-X WALDIR | --waldir=WALDIR]
|
||||||
|
[-I | --incremental-mode=none|checksum|lsn]
|
||||||
|
[--db-include | --db-exclude]
|
||||||
|
[--remote-proto] [--remote-host]
|
||||||
|
[--remote-port] [--remote-path] [--remote-user]
|
||||||
|
[--ssh-options]
|
||||||
|
[--archive-host=hostname]
|
||||||
|
[--archive-port=port] [--archive-user=username]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup validate -B backup-path [--instance=instance_name]
|
||||||
|
[-i backup-id] [--progress] [-j num-threads]
|
||||||
|
[--recovery-target-time=time|--recovery-target-xid=xid
|
||||||
|
|--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]
|
||||||
|
[--recovery-target-timeline=timeline]
|
||||||
|
[--recovery-target-name=target-name]
|
||||||
|
[--skip-block-validation]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup checkdb [-B backup-path] [--instance=instance_name]
|
||||||
|
[-D pgdata-path] [--progress] [-j num-threads]
|
||||||
|
[--amcheck] [--skip-block-validation]
|
||||||
|
[--heapallindexed] [--checkunique]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup show -B backup-path
|
||||||
|
[--instance=instance_name [-i backup-id]]
|
||||||
|
[--format=format] [--archive]
|
||||||
|
[--no-color] [--help]
|
||||||
|
|
||||||
|
pg_probackup delete -B backup-path --instance=instance_name
|
||||||
|
[-j num-threads] [--progress]
|
||||||
|
[--retention-redundancy=retention-redundancy]
|
||||||
|
[--retention-window=retention-window]
|
||||||
|
[--wal-depth=wal-depth]
|
||||||
|
[-i backup-id | --delete-expired | --merge-expired | --status=backup_status]
|
||||||
|
[--delete-wal]
|
||||||
|
[--dry-run] [--no-validate] [--no-sync]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup merge -B backup-path --instance=instance_name
|
||||||
|
-i backup-id [--progress] [-j num-threads]
|
||||||
|
[--no-validate] [--no-sync]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup add-instance -B backup-path -D pgdata-path
|
||||||
|
--instance=instance_name
|
||||||
|
[--external-dirs=external-directories-paths]
|
||||||
|
[--remote-proto] [--remote-host]
|
||||||
|
[--remote-port] [--remote-path] [--remote-user]
|
||||||
|
[--ssh-options]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup del-instance -B backup-path
|
||||||
|
--instance=instance_name
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup archive-push -B backup-path --instance=instance_name
|
||||||
|
--wal-file-name=wal-file-name
|
||||||
|
[--wal-file-path=wal-file-path]
|
||||||
|
[-j num-threads] [--batch-size=batch_size]
|
||||||
|
[--archive-timeout=timeout]
|
||||||
|
[--no-ready-rename] [--no-sync]
|
||||||
|
[--overwrite] [--compress]
|
||||||
|
[--compress-algorithm=compress-algorithm]
|
||||||
|
[--compress-level=compress-level]
|
||||||
|
[--remote-proto] [--remote-host]
|
||||||
|
[--remote-port] [--remote-path] [--remote-user]
|
||||||
|
[--ssh-options]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup archive-get -B backup-path --instance=instance_name
|
||||||
|
--wal-file-path=wal-file-path
|
||||||
|
--wal-file-name=wal-file-name
|
||||||
|
[-j num-threads] [--batch-size=batch_size]
|
||||||
|
[--no-validate-wal]
|
||||||
|
[--remote-proto] [--remote-host]
|
||||||
|
[--remote-port] [--remote-path] [--remote-user]
|
||||||
|
[--ssh-options]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
pg_probackup catchup -b catchup-mode
|
||||||
|
--source-pgdata=path_to_pgdata_on_remote_server
|
||||||
|
--destination-pgdata=path_to_local_dir
|
||||||
|
[--stream [-S slot-name] [--temp-slot | --perm-slot]]
|
||||||
|
[-j num-threads]
|
||||||
|
[-T OLDDIR=NEWDIR]
|
||||||
|
[--exclude-path=path_prefix]
|
||||||
|
[-d dbname] [-h host] [-p port] [-U username]
|
||||||
|
[-w --no-password] [-W --password]
|
||||||
|
[--remote-proto] [--remote-host]
|
||||||
|
[--remote-port] [--remote-path] [--remote-user]
|
||||||
|
[--ssh-options]
|
||||||
|
[--dry-run]
|
||||||
|
[--help]
|
||||||
|
|
||||||
|
Подробнее читайте на сайте <https://github.com/postgrespro/pg_probackup>.
|
||||||
|
Сообщайте об ошибках в <https://github.com/postgrespro/pg_probackup/issues>.
|
@ -1 +1 @@
|
|||||||
pg_probackup 2.5.5
|
pg_probackup 2.5.6
|
||||||
|
@ -89,11 +89,7 @@ def dir_files(base_dir):
|
|||||||
|
|
||||||
def is_enterprise():
|
def is_enterprise():
|
||||||
# pg_config --help
|
# pg_config --help
|
||||||
if os.name == 'posix':
|
cmd = [os.environ['PG_CONFIG'], '--help']
|
||||||
cmd = [os.environ['PG_CONFIG'], '--pgpro-edition']
|
|
||||||
|
|
||||||
elif os.name == 'nt':
|
|
||||||
cmd = [[os.environ['PG_CONFIG']], ['--pgpro-edition']]
|
|
||||||
|
|
||||||
p = subprocess.Popen(
|
p = subprocess.Popen(
|
||||||
cmd,
|
cmd,
|
||||||
@ -102,6 +98,18 @@ def is_enterprise():
|
|||||||
)
|
)
|
||||||
return b'postgrespro.ru' in p.communicate()[0]
|
return b'postgrespro.ru' in p.communicate()[0]
|
||||||
|
|
||||||
|
|
||||||
|
def is_nls_enabled():
|
||||||
|
cmd = [os.environ['PG_CONFIG'], '--configure']
|
||||||
|
|
||||||
|
p = subprocess.Popen(
|
||||||
|
cmd,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE
|
||||||
|
)
|
||||||
|
return b'enable-nls' in p.communicate()[0]
|
||||||
|
|
||||||
|
|
||||||
class ProbackupException(Exception):
|
class ProbackupException(Exception):
|
||||||
def __init__(self, message, cmd):
|
def __init__(self, message, cmd):
|
||||||
self.message = message
|
self.message = message
|
||||||
@ -147,6 +155,7 @@ def slow_start(self, replica=False):
|
|||||||
class ProbackupTest(object):
|
class ProbackupTest(object):
|
||||||
# Class attributes
|
# Class attributes
|
||||||
enterprise = is_enterprise()
|
enterprise = is_enterprise()
|
||||||
|
enable_nls = is_nls_enabled()
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(ProbackupTest, self).__init__(*args, **kwargs)
|
super(ProbackupTest, self).__init__(*args, **kwargs)
|
||||||
@ -180,8 +189,8 @@ class ProbackupTest(object):
|
|||||||
self.test_env['LC_MESSAGES'] = 'C'
|
self.test_env['LC_MESSAGES'] = 'C'
|
||||||
self.test_env['LC_TIME'] = 'C'
|
self.test_env['LC_TIME'] = 'C'
|
||||||
|
|
||||||
self.gdb = 'PGPROBACKUP_GDB' in os.environ and \
|
self.gdb = 'PGPROBACKUP_GDB' in self.test_env and \
|
||||||
os.environ['PGPROBACKUP_GDB'] == 'ON'
|
self.test_env['PGPROBACKUP_GDB'] == 'ON'
|
||||||
|
|
||||||
self.paranoia = 'PG_PROBACKUP_PARANOIA' in self.test_env and \
|
self.paranoia = 'PG_PROBACKUP_PARANOIA' in self.test_env and \
|
||||||
self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON'
|
self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON'
|
||||||
@ -810,7 +819,7 @@ class ProbackupTest(object):
|
|||||||
if self.verbose:
|
if self.verbose:
|
||||||
print(self.cmd)
|
print(self.cmd)
|
||||||
if gdb:
|
if gdb:
|
||||||
return GDBobj([binary_path] + command, self.verbose)
|
return GDBobj([binary_path] + command, self)
|
||||||
if asynchronous:
|
if asynchronous:
|
||||||
return subprocess.Popen(
|
return subprocess.Popen(
|
||||||
[binary_path] + command,
|
[binary_path] + command,
|
||||||
@ -1861,22 +1870,34 @@ class ProbackupTest(object):
|
|||||||
self.assertFalse(fail, error_message)
|
self.assertFalse(fail, error_message)
|
||||||
|
|
||||||
def gdb_attach(self, pid):
|
def gdb_attach(self, pid):
|
||||||
return GDBobj([str(pid)], self.verbose, attach=True)
|
return GDBobj([str(pid)], self, attach=True)
|
||||||
|
|
||||||
|
def _check_gdb_flag_or_skip_test(self):
|
||||||
|
if not self.gdb:
|
||||||
|
self.skipTest(
|
||||||
|
"Specify PGPROBACKUP_GDB and build without "
|
||||||
|
"optimizations for run this test"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class GdbException(Exception):
|
class GdbException(Exception):
|
||||||
def __init__(self, message=False):
|
def __init__(self, message="False"):
|
||||||
self.message = message
|
self.message = message
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '\n ERROR: {0}\n'.format(repr(self.message))
|
return '\n ERROR: {0}\n'.format(repr(self.message))
|
||||||
|
|
||||||
|
|
||||||
class GDBobj(ProbackupTest):
|
class GDBobj:
|
||||||
def __init__(self, cmd, verbose, attach=False):
|
def __init__(self, cmd, env, attach=False):
|
||||||
self.verbose = verbose
|
self.verbose = env.verbose
|
||||||
self.output = ''
|
self.output = ''
|
||||||
|
|
||||||
|
# Check gdb flag is set up
|
||||||
|
if not env.gdb:
|
||||||
|
raise GdbException("No `PGPROBACKUP_GDB=on` is set, "
|
||||||
|
"test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start "
|
||||||
|
"and be skipped")
|
||||||
# Check gdb presense
|
# Check gdb presense
|
||||||
try:
|
try:
|
||||||
gdb_version, _ = subprocess.Popen(
|
gdb_version, _ = subprocess.Popen(
|
||||||
|
@ -17,6 +17,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
|||||||
run validate, expect it to successfully executed,
|
run validate, expect it to successfully executed,
|
||||||
concurrent RUNNING backup with pid file and active process is legal
|
concurrent RUNNING backup with pid file and active process is legal
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -72,6 +74,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
|||||||
RUNNING backup with pid file AND without active pid is legal,
|
RUNNING backup with pid file AND without active pid is legal,
|
||||||
but his status must be changed to ERROR and pid file is deleted
|
but his status must be changed to ERROR and pid file is deleted
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -142,6 +146,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
|||||||
RUNNING backup with pid file AND without active pid is legal,
|
RUNNING backup with pid file AND without active pid is legal,
|
||||||
but his status must be changed to ERROR and pid file is deleted
|
but his status must be changed to ERROR and pid file is deleted
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -240,6 +246,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
|||||||
RUNNING backup without pid file AND without active pid is legal,
|
RUNNING backup without pid file AND without active pid is legal,
|
||||||
his status must be changed to ERROR
|
his status must be changed to ERROR
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -310,6 +318,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
|||||||
Expect restore to sucseed because read-only locks
|
Expect restore to sucseed because read-only locks
|
||||||
do not conflict
|
do not conflict
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -352,6 +362,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
|||||||
Expect restore to fail because validation of
|
Expect restore to fail because validation of
|
||||||
intermediate backup is impossible
|
intermediate backup is impossible
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -443,6 +455,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
|||||||
and stop it in the middle, delete full backup.
|
and stop it in the middle, delete full backup.
|
||||||
Expect it to fail.
|
Expect it to fail.
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -585,6 +599,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
|||||||
"""
|
"""
|
||||||
Make sure that shared lock leaves no files with pids
|
Make sure that shared lock leaves no files with pids
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
|
@ -12,6 +12,10 @@ class LogTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.expectedFailure
|
# @unittest.expectedFailure
|
||||||
# PGPRO-2154
|
# PGPRO-2154
|
||||||
def test_log_rotation(self):
|
def test_log_rotation(self):
|
||||||
|
"""
|
||||||
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
|
@ -975,6 +975,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
"""
|
"""
|
||||||
Check that failed MERGE can be continued
|
Check that failed MERGE can be continued
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1051,6 +1053,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
"""
|
"""
|
||||||
Fail merge via gdb, corrupt DELTA backup, try to continue merge
|
Fail merge via gdb, corrupt DELTA backup, try to continue merge
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1148,6 +1152,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
"""
|
"""
|
||||||
Check that failed MERGE on delete can be continued
|
Check that failed MERGE on delete can be continued
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1219,6 +1225,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
Check that failed MERGE cannot be continued if intermediate
|
Check that failed MERGE cannot be continued if intermediate
|
||||||
backup is missing.
|
backup is missing.
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1409,6 +1417,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
check that crashing after opening backup.control
|
check that crashing after opening backup.control
|
||||||
for writing will not result in losing backup metadata
|
for writing will not result in losing backup metadata
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1461,6 +1471,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
for writing will not result in losing metadata about backup files
|
for writing will not result in losing metadata about backup files
|
||||||
TODO: rewrite
|
TODO: rewrite
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1552,6 +1564,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
for writing will not result in losing metadata about backup files
|
for writing will not result in losing metadata about backup files
|
||||||
TODO: rewrite
|
TODO: rewrite
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1639,6 +1653,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_failed_merge_after_delete(self):
|
def test_failed_merge_after_delete(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1720,6 +1736,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_failed_merge_after_delete_1(self):
|
def test_failed_merge_after_delete_1(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1796,6 +1814,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_failed_merge_after_delete_2(self):
|
def test_failed_merge_after_delete_2(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -1858,6 +1878,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_failed_merge_after_delete_3(self):
|
def test_failed_merge_after_delete_3(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -2281,6 +2303,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_idempotent_merge(self):
|
def test_idempotent_merge(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -2580,6 +2604,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
page header map cannot be trusted when
|
page header map cannot be trusted when
|
||||||
running retry
|
running retry
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -2626,6 +2652,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_missing_data_file(self):
|
def test_missing_data_file(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -2684,6 +2712,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_missing_non_data_file(self):
|
def test_missing_non_data_file(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -2741,6 +2771,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_merge_remote_mode(self):
|
def test_merge_remote_mode(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import unittest
|
import unittest
|
||||||
import os
|
import os
|
||||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||||
|
import locale
|
||||||
|
|
||||||
|
|
||||||
module_name = 'option'
|
module_name = 'option'
|
||||||
@ -23,7 +24,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
|||||||
"""help options"""
|
"""help options"""
|
||||||
with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out:
|
with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out:
|
||||||
self.assertIn(
|
self.assertIn(
|
||||||
version_out.read().decode("utf-8"),
|
version_out.read().decode("utf-8").strip(),
|
||||||
self.run_pb(["--version"])
|
self.run_pb(["--version"])
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -226,3 +227,17 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
# Clean after yourself
|
# Clean after yourself
|
||||||
self.del_test_dir(module_name, fname)
|
self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
|
# @unittest.skip("skip")
|
||||||
|
def test_help_6(self):
|
||||||
|
"""help options"""
|
||||||
|
if ProbackupTest.enable_nls:
|
||||||
|
self.test_env['LC_ALL'] = 'ru_RU.utf-8'
|
||||||
|
with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out:
|
||||||
|
self.assertEqual(
|
||||||
|
self.run_pb(["--help"]),
|
||||||
|
help_out.read().decode("utf-8")
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return unittest.skip(
|
||||||
|
'You need configure PostgreSQL with --enabled-nls option for this test')
|
||||||
|
@ -18,11 +18,8 @@ class BugTest(ProbackupTest, unittest.TestCase):
|
|||||||
"""
|
"""
|
||||||
https://jira.postgrespro.ru/browse/PGPRO-2068
|
https://jira.postgrespro.ru/browse/PGPRO-2068
|
||||||
"""
|
"""
|
||||||
if not self.gdb:
|
self._check_gdb_flag_or_skip_test()
|
||||||
self.skipTest(
|
|
||||||
"Specify PGPROBACKUP_GDB and build without "
|
|
||||||
"optimizations for run this test"
|
|
||||||
)
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
|
@ -822,6 +822,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_ptrack_vacuum_full(self):
|
def test_ptrack_vacuum_full(self):
|
||||||
"""make node, make full and ptrack stream backups,
|
"""make node, make full and ptrack stream backups,
|
||||||
restore them and check data correctness"""
|
restore them and check data correctness"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, self.fname, 'node'),
|
base_dir=os.path.join(module_name, self.fname, 'node'),
|
||||||
|
@ -539,11 +539,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
start backup from replica, during backup promote replica
|
start backup from replica, during backup promote replica
|
||||||
check that backup is failed
|
check that backup is failed
|
||||||
"""
|
"""
|
||||||
if not self.gdb:
|
self._check_gdb_flag_or_skip_test()
|
||||||
self.skipTest(
|
|
||||||
"Specify PGPROBACKUP_GDB and build without "
|
|
||||||
"optimizations for run this test"
|
|
||||||
)
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
master = self.make_simple_node(
|
master = self.make_simple_node(
|
||||||
@ -634,11 +631,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_replica_stop_lsn_null_offset(self):
|
def test_replica_stop_lsn_null_offset(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
if not self.gdb:
|
self._check_gdb_flag_or_skip_test()
|
||||||
self.skipTest(
|
|
||||||
"Specify PGPROBACKUP_GDB and build without "
|
|
||||||
"optimizations for run this test"
|
|
||||||
)
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
master = self.make_simple_node(
|
master = self.make_simple_node(
|
||||||
@ -722,11 +716,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_replica_stop_lsn_null_offset_next_record(self):
|
def test_replica_stop_lsn_null_offset_next_record(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
if not self.gdb:
|
self._check_gdb_flag_or_skip_test()
|
||||||
self.skipTest(
|
|
||||||
"Specify PGPROBACKUP_GDB and build without "
|
|
||||||
"optimizations for run this test"
|
|
||||||
)
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
master = self.make_simple_node(
|
master = self.make_simple_node(
|
||||||
@ -749,7 +740,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
# freeze bgwriter to get rid of RUNNING XACTS records
|
# freeze bgwriter to get rid of RUNNING XACTS records
|
||||||
bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0]
|
bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0]
|
||||||
gdb_checkpointer = self.gdb_attach(bgwriter_pid)
|
|
||||||
|
|
||||||
self.backup_node(backup_dir, 'master', master)
|
self.backup_node(backup_dir, 'master', master)
|
||||||
|
|
||||||
@ -828,6 +818,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_archive_replica_null_offset(self):
|
def test_archive_replica_null_offset(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
master = self.make_simple_node(
|
master = self.make_simple_node(
|
||||||
@ -998,11 +990,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
make archive master, take full and page archive backups from master,
|
make archive master, take full and page archive backups from master,
|
||||||
set replica, make archive backup from replica
|
set replica, make archive backup from replica
|
||||||
"""
|
"""
|
||||||
if not self.gdb:
|
self._check_gdb_flag_or_skip_test()
|
||||||
self.skipTest(
|
|
||||||
"Specify PGPROBACKUP_GDB and build without "
|
|
||||||
"optimizations for run this test"
|
|
||||||
)
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
master = self.make_simple_node(
|
master = self.make_simple_node(
|
||||||
@ -1104,11 +1093,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
def test_start_stop_lsn_in_the_same_segno(self):
|
def test_start_stop_lsn_in_the_same_segno(self):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
if not self.gdb:
|
|
||||||
self.skipTest(
|
|
||||||
"Specify PGPROBACKUP_GDB and build without "
|
|
||||||
"optimizations for run this test"
|
|
||||||
)
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
master = self.make_simple_node(
|
master = self.make_simple_node(
|
||||||
@ -1131,7 +1116,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
# freeze bgwriter to get rid of RUNNING XACTS records
|
# freeze bgwriter to get rid of RUNNING XACTS records
|
||||||
bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0]
|
bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0]
|
||||||
gdb_checkpointer = self.gdb_attach(bgwriter_pid)
|
|
||||||
|
|
||||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||||
|
|
||||||
|
@ -2379,6 +2379,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_restore_concurrent_drop_table(self):
|
def test_restore_concurrent_drop_table(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -3796,6 +3798,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_concurrent_restore(self):
|
def test_concurrent_restore(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -3915,3 +3919,59 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
# Clean after yourself
|
# Clean after yourself
|
||||||
self.del_test_dir(module_name, fname)
|
self.del_test_dir(module_name, fname)
|
||||||
|
|
||||||
|
# @unittest.skip("skip")
|
||||||
|
def test_restore_with_waldir(self):
|
||||||
|
"""recovery using tablespace-mapping option and page backup"""
|
||||||
|
fname = self.id().split('.')[3]
|
||||||
|
node = self.make_simple_node(
|
||||||
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
|
initdb_params=['--data-checksums'])
|
||||||
|
|
||||||
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
|
self.init_pb(backup_dir)
|
||||||
|
self.add_instance(backup_dir, 'node', node)
|
||||||
|
self.set_archiving(backup_dir, 'node', node)
|
||||||
|
node.slow_start()
|
||||||
|
|
||||||
|
|
||||||
|
with node.connect("postgres") as con:
|
||||||
|
con.execute(
|
||||||
|
"CREATE TABLE tbl AS SELECT * "
|
||||||
|
"FROM generate_series(0,3) AS integer")
|
||||||
|
con.commit()
|
||||||
|
|
||||||
|
# Full backup
|
||||||
|
backup_id = self.backup_node(backup_dir, 'node', node)
|
||||||
|
|
||||||
|
node.stop()
|
||||||
|
node.cleanup()
|
||||||
|
|
||||||
|
# Create waldir
|
||||||
|
waldir_path = os.path.join(node.base_dir, "waldir")
|
||||||
|
os.makedirs(waldir_path)
|
||||||
|
|
||||||
|
# Test recovery from latest
|
||||||
|
self.assertIn(
|
||||||
|
"INFO: Restore of backup {0} completed.".format(backup_id),
|
||||||
|
self.restore_node(
|
||||||
|
backup_dir, 'node', node,
|
||||||
|
options=[
|
||||||
|
"-X", "%s" % (waldir_path)]),
|
||||||
|
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||||
|
repr(self.output), self.cmd))
|
||||||
|
node.slow_start()
|
||||||
|
|
||||||
|
count = node.execute("postgres", "SELECT count(*) FROM tbl")
|
||||||
|
self.assertEqual(count[0][0], 4)
|
||||||
|
|
||||||
|
# check pg_wal is symlink
|
||||||
|
if node.major_version >= 10:
|
||||||
|
wal_path=os.path.join(node.data_dir, "pg_wal")
|
||||||
|
else:
|
||||||
|
wal_path=os.path.join(node.data_dir, "pg_xlog")
|
||||||
|
|
||||||
|
self.assertEqual(os.path.islink(wal_path), True)
|
||||||
|
|
||||||
|
# Clean after yourself
|
||||||
|
self.del_test_dir(module_name, fname)
|
||||||
|
@ -1499,6 +1499,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
|||||||
FULL
|
FULL
|
||||||
-------window
|
-------window
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -1546,6 +1548,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
|||||||
FULL
|
FULL
|
||||||
-------window
|
-------window
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -1588,6 +1592,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
def test_retention_redundancy_overlapping_chains(self):
|
def test_retention_redundancy_overlapping_chains(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -1636,6 +1642,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
|||||||
|
|
||||||
def test_retention_redundancy_overlapping_chains_1(self):
|
def test_retention_redundancy_overlapping_chains_1(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -1744,6 +1752,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
|||||||
"""
|
"""
|
||||||
Check that retention purge works correctly with MERGING backups
|
Check that retention purge works correctly with MERGING backups
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
@ -2536,6 +2546,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
|||||||
"""
|
"""
|
||||||
https://github.com/postgrespro/pg_probackup/issues/328
|
https://github.com/postgrespro/pg_probackup/issues/328
|
||||||
"""
|
"""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
|
76
tests/time_consuming.py
Normal file
76
tests/time_consuming.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
import os
|
||||||
|
import unittest
|
||||||
|
from .helpers.ptrack_helpers import ProbackupTest
|
||||||
|
import subprocess
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
module_name = 'time_consuming'
|
||||||
|
|
||||||
|
class TimeConsumingTests(ProbackupTest, unittest.TestCase):
|
||||||
|
def test_pbckp150(self):
|
||||||
|
"""
|
||||||
|
https://jira.postgrespro.ru/browse/PBCKP-150
|
||||||
|
create a node filled with pgbench
|
||||||
|
create FULL backup followed by PTRACK backup
|
||||||
|
run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel
|
||||||
|
"""
|
||||||
|
# init node
|
||||||
|
fname = self.id().split('.')[3]
|
||||||
|
node = self.make_simple_node(
|
||||||
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
|
set_replication=True,
|
||||||
|
initdb_params=['--data-checksums'])
|
||||||
|
node.append_conf('postgresql.conf',
|
||||||
|
"""
|
||||||
|
max_connections = 100
|
||||||
|
wal_keep_size = 16000
|
||||||
|
ptrack.map_size = 1
|
||||||
|
shared_preload_libraries='ptrack'
|
||||||
|
log_statement = 'none'
|
||||||
|
fsync = off
|
||||||
|
log_checkpoints = on
|
||||||
|
autovacuum = off
|
||||||
|
""")
|
||||||
|
|
||||||
|
# init probackup and add an instance
|
||||||
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
|
self.init_pb(backup_dir)
|
||||||
|
self.add_instance(backup_dir, 'node', node)
|
||||||
|
|
||||||
|
# run the node and init ptrack
|
||||||
|
node.slow_start()
|
||||||
|
node.safe_psql("postgres", "CREATE EXTENSION ptrack")
|
||||||
|
# populate it with pgbench
|
||||||
|
node.pgbench_init(scale=5)
|
||||||
|
|
||||||
|
# FULL backup followed by PTRACK backup
|
||||||
|
self.backup_node(backup_dir, 'node', node, options=['--stream'])
|
||||||
|
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
|
||||||
|
|
||||||
|
# run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel
|
||||||
|
nBenchDuration = 30
|
||||||
|
pgbench = node.pgbench(options=['-c', '20', '-j', '8', '-T', str(nBenchDuration)])
|
||||||
|
with open('/tmp/pbckp150vacuum.sql', 'w') as f:
|
||||||
|
f.write('VACUUM (FULL) pgbench_accounts, pgbench_tellers, pgbench_history; SELECT pg_sleep(1);\n')
|
||||||
|
pgbenchval = node.pgbench(options=['-c', '1', '-f', '/tmp/pbckp150vacuum.sql', '-T', str(nBenchDuration)])
|
||||||
|
|
||||||
|
# several PTRACK backups
|
||||||
|
for i in range(nBenchDuration):
|
||||||
|
print("[{}] backing up PTRACK diff...".format(i+1))
|
||||||
|
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE'])
|
||||||
|
sleep(0.1)
|
||||||
|
# if the activity pgbench has finished, stop backing up
|
||||||
|
if pgbench.poll() is not None:
|
||||||
|
break
|
||||||
|
|
||||||
|
pgbench.kill()
|
||||||
|
pgbenchval.kill()
|
||||||
|
pgbench.wait()
|
||||||
|
pgbenchval.wait()
|
||||||
|
|
||||||
|
backups = self.show_pb(backup_dir, 'node')
|
||||||
|
for b in backups:
|
||||||
|
self.assertEqual("OK", b['status'])
|
||||||
|
|
||||||
|
# Clean after yourself
|
||||||
|
self.del_test_dir(module_name, fname)
|
@ -2,6 +2,7 @@ import os
|
|||||||
import unittest
|
import unittest
|
||||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
from pathlib import Path
|
||||||
import subprocess
|
import subprocess
|
||||||
from sys import exit
|
from sys import exit
|
||||||
import time
|
import time
|
||||||
@ -58,7 +59,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
|||||||
with open(log_file_path) as f:
|
with open(log_file_path) as f:
|
||||||
log_content = f.read()
|
log_content = f.read()
|
||||||
self.assertIn(
|
self.assertIn(
|
||||||
'File: "{0}" blknum 1, empty page'.format(file),
|
'File: "{0}" blknum 1, empty page'.format(Path(file).as_posix()),
|
||||||
log_content,
|
log_content,
|
||||||
'Failed to detect nullified block')
|
'Failed to detect nullified block')
|
||||||
|
|
||||||
@ -1088,11 +1089,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
|||||||
"""
|
"""
|
||||||
check that interrupt during validation is handled correctly
|
check that interrupt during validation is handled correctly
|
||||||
"""
|
"""
|
||||||
if not self.gdb:
|
self._check_gdb_flag_or_skip_test()
|
||||||
self.skipTest(
|
|
||||||
"Specify PGPROBACKUP_GDB and build without "
|
|
||||||
"optimizations for run this test"
|
|
||||||
)
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
base_dir=os.path.join(module_name, fname, 'node'),
|
base_dir=os.path.join(module_name, fname, 'node'),
|
||||||
@ -3564,6 +3562,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
|||||||
# @unittest.skip("skip")
|
# @unittest.skip("skip")
|
||||||
def test_validation_after_backup(self):
|
def test_validation_after_backup(self):
|
||||||
""""""
|
""""""
|
||||||
|
self._check_gdb_flag_or_skip_test()
|
||||||
|
|
||||||
fname = self.id().split('.')[3]
|
fname = self.id().split('.')[3]
|
||||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||||
node = self.make_simple_node(
|
node = self.make_simple_node(
|
||||||
|
@ -47,7 +47,7 @@ cd postgres # Go to postgres dir
|
|||||||
if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then
|
if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then
|
||||||
git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff
|
git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff
|
||||||
fi
|
fi
|
||||||
CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests
|
CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls
|
||||||
make -s -j$(nproc) install
|
make -s -j$(nproc) install
|
||||||
#make -s -j$(nproc) -C 'src/common' install
|
#make -s -j$(nproc) -C 'src/common' install
|
||||||
#make -s -j$(nproc) -C 'src/port' install
|
#make -s -j$(nproc) -C 'src/port' install
|
||||||
@ -100,11 +100,20 @@ source pyenv/bin/activate
|
|||||||
pip3 install testgres
|
pip3 install testgres
|
||||||
|
|
||||||
echo "############### Testing:"
|
echo "############### Testing:"
|
||||||
|
echo PG_PROBACKUP_PARANOIA=${PG_PROBACKUP_PARANOIA}
|
||||||
|
echo ARCHIVE_COMPRESSION=${ARCHIVE_COMPRESSION}
|
||||||
|
echo PGPROBACKUPBIN_OLD=${PGPROBACKUPBIN_OLD}
|
||||||
|
echo PGPROBACKUPBIN=${PGPROBACKUPBIN}
|
||||||
|
echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE}
|
||||||
|
echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB}
|
||||||
|
echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK}
|
||||||
if [ "$MODE" = "basic" ]; then
|
if [ "$MODE" = "basic" ]; then
|
||||||
export PG_PROBACKUP_TEST_BASIC=ON
|
export PG_PROBACKUP_TEST_BASIC=ON
|
||||||
|
echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC}
|
||||||
python3 -m unittest -v tests
|
python3 -m unittest -v tests
|
||||||
python3 -m unittest -v tests.init
|
python3 -m unittest -v tests.init
|
||||||
else
|
else
|
||||||
|
echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC}
|
||||||
python3 -m unittest -v tests.$MODE
|
python3 -m unittest -v tests.$MODE
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user