1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-07-14 06:54:15 +02:00

Merge branch 'REL_2_5' into PBCKP-155

This commit is contained in:
dlepikhova
2022-08-04 13:22:29 +05:00
committed by GitHub
47 changed files with 2993 additions and 222 deletions

94
.github/workflows/build.yml vendored Normal file
View File

@ -0,0 +1,94 @@
name: Build Probackup
on:
push:
branches:
- "**"
# Runs triggered by pull requests are disabled to prevent executing potentially unsafe code from public pull requests
# pull_request:
# branches:
# - main
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
build-win2019:
runs-on:
- windows-2019
env:
zlib_dir: C:\dep\zlib
steps:
- uses: actions/checkout@v2
- name: Install pacman packages
run: |
$env:PATH += ";C:\msys64\usr\bin"
pacman -S --noconfirm --needed bison flex
- name: Make zlib
run: |
git clone -b v1.2.11 --depth 1 https://github.com/madler/zlib.git
cd zlib
cmake -DCMAKE_INSTALL_PREFIX:PATH=C:\dep\zlib -G "Visual Studio 16 2019" .
cmake --build . --config Release --target ALL_BUILD
cmake --build . --config Release --target INSTALL
copy C:\dep\zlib\lib\zlibstatic.lib C:\dep\zlib\lib\zdll.lib
copy C:\dep\zlib\bin\zlib.dll C:\dep\zlib\lib
- name: Get Postgres sources
run: git clone -b REL_14_STABLE https://github.com/postgres/postgres.git
# Copy ptrack to contrib to build the ptrack extension
# Convert line breaks in the patch file to LF otherwise the patch doesn't apply
- name: Get Ptrack sources
run: |
git clone -b master --depth 1 https://github.com/postgrespro/ptrack.git
Copy-Item -Path ptrack -Destination postgres\contrib -Recurse
(Get-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Raw).Replace("`r`n","`n") | Set-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Force -NoNewline
cd postgres
git apply -3 ../ptrack/patches/REL_14_STABLE-ptrack-core.diff
- name: Build Postgres
run: |
$env:PATH += ";C:\msys64\usr\bin"
cd postgres\src\tools\msvc
(Get-Content config_default.pl) -Replace "zlib *=>(.*?)(?=,? *#)", "zlib => '${{ env.zlib_dir }}'" | Set-Content config.pl
cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && .\build.bat"
- name: Build Probackup
run: cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && perl .\gen_probackup_project.pl `"${{ github.workspace }}`"\postgres"
- name: Install Postgres
run: |
cd postgres
src\tools\msvc\install.bat postgres_install
- name: Install Testgres
run: |
git clone -b no-port-for --single-branch --depth 1 https://github.com/postgrespro/testgres.git
cd testgres
python setup.py install
# Grant the Github runner user full control of the workspace for initdb to successfully process the data folder
- name: Test Probackup
run: |
icacls.exe "${{ github.workspace }}" /grant "${env:USERNAME}:(OI)(CI)F"
$env:PATH += ";${{ github.workspace }}\postgres\postgres_install\lib;${{ env.zlib_dir }}\lib"
$Env:LC_MESSAGES = "English"
$Env:PG_CONFIG = "${{ github.workspace }}\postgres\postgres_install\bin\pg_config.exe"
$Env:PGPROBACKUPBIN = "${{ github.workspace }}\postgres\Release\pg_probackup\pg_probackup.exe"
$Env:PG_PROBACKUP_PTRACK = "ON"
If (!$Env:MODE -Or $Env:MODE -Eq "basic") {
$Env:PG_PROBACKUP_TEST_BASIC = "ON"
python -m unittest -v tests
python -m unittest -v tests.init
} else {
python -m unittest -v tests.$Env:MODE
}

1
.gitignore vendored
View File

@ -50,7 +50,6 @@
/docker-compose.yml
/Dockerfile
/Dockerfile.in
/run_tests.sh
/make_dockerfile.sh
/backup_restore.sh

View File

@ -41,11 +41,13 @@ env:
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=merge
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=option
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=page
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=ptrack
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=retention
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=time_consuming
jobs:
allow_failures:

View File

@ -224,3 +224,17 @@ Postgres Professional, Moscow, Russia.
## Credits
`pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier.
### Localization files (*.po)
Description of how to add new translation languages.
1. Add a flag --enable-nls in configure.
2. Build postgres.
3. Adding to nls.mk in folder pg_probackup required files in GETTEXT_FILES.
4. In folder pg_probackup do 'make update-po'.
5. As a result, the progname.pot file will be created. Copy the content and add it to the file with the desired language.
6. Adding to nls.mk in folder pg_probackup required language in AVAIL_LANGUAGES.
For more information, follow the link below:
https://postgrespro.ru/docs/postgresql/12/nls-translator

View File

@ -3563,6 +3563,14 @@ pg_probackup catchup -b <replaceable>catchup_mode</replaceable> --source-pgdata=
of threads with the <option>--threads</option> option:
<programlisting>
pg_probackup catchup -b <replaceable>catchup_mode</replaceable> --source-pgdata=<replaceable>path_to_pgdata_on_remote_server</replaceable> --destination-pgdata=<replaceable>path_to_local_dir</replaceable> --stream --threads=<replaceable>num_threads</replaceable>
</programlisting>
</para>
<para>
Before cloning/synchronising a <productname>PostgreSQL</productname> instance, you can run the
<command>catchup</command> command with the <option>--dry-run</option> flag
to estimate the size of data files to be transferred, but make no changes on disk:
<programlisting>
pg_probackup catchup -b <replaceable>catchup_mode</replaceable> --source-pgdata=<replaceable>path_to_pgdata_on_remote_server</replaceable> --destination-pgdata=<replaceable>path_to_local_dir</replaceable> --stream --dry-run
</programlisting>
</para>
<para>
@ -3576,7 +3584,7 @@ pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replic
<para>
Another example shows how you can add a new remote standby server with the <productname>PostgreSQL</productname> data directory <filename>/replica-pgdata</filename> by running the <command>catchup</command> command in the <literal>FULL</literal> mode
on four parallel threads:
<programlisting>
<programlisting>
pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=FULL --remote-host=remote-hostname --remote-user=remote-unix-username -j 4
</programlisting>
</para>
@ -4482,7 +4490,7 @@ pg_probackup archive-get -B <replaceable>backup_dir</replaceable> --instance <re
pg_probackup catchup -b <replaceable>catchup_mode</replaceable>
--source-pgdata=<replaceable>path_to_pgdata_on_remote_server</replaceable>
--destination-pgdata=<replaceable>path_to_local_dir</replaceable>
[--help] [-j | --threads=<replaceable>num_threads</replaceable>] [--stream]
[--help] [-j | --threads=<replaceable>num_threads</replaceable>] [--stream] [--dry-run]
[--temp-slot] [-P | --perm-slot] [-S | --slot=<replaceable>slot_name</replaceable>]
[--exclude-path=<replaceable>PATHNAME</replaceable>]
[-T <replaceable>OLDDIR</replaceable>=<replaceable>NEWDIR</replaceable>]
@ -4571,6 +4579,19 @@ pg_probackup catchup -b <replaceable>catchup_mode</replaceable>
</listitem>
</varlistentry>
<varlistentry>
<term><option>--dry-run</option></term>
<listitem>
<para>
Displays the total size of the files to be transferred by <command>catchup</command>.
This flag initiates a trial run of <command>catchup</command>, which does
not actually create, delete or move files on disk. WAL streaming is skipped with <option>--dry-run</option>.
This flag also allows you to check that
all the options are correct and cloning/synchronising is ready to run.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>-x</option>=<replaceable>path_prefix</replaceable></term>
<term><option>--exclude-path</option>=<replaceable>path_prefix</replaceable></term>
@ -4591,17 +4612,6 @@ pg_probackup catchup -b <replaceable>catchup_mode</replaceable>
</listitem>
</varlistentry>
<varlistentry>
<term><option>--stream</option></term>
<listitem>
<para>
Copies the instance in <link linkend="pbk-stream-mode">STREAM</link> WAL delivery mode,
including all the necessary WAL files by streaming them from
the instance server via replication protocol.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>--temp-slot</option></term>
<listitem>

View File

@ -13,11 +13,11 @@ if (($#ARGV+1)==1)
{
$pgsrc = shift @ARGV;
if($pgsrc eq "--help"){
print STDERR "Usage $0 pg-source-dir \n";
print STDERR "Like this: \n";
print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro \n";
print STDERR "May be need input this before: \n";
print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall\" amd64\n";
print STDERR "Usage $0 pg-source-dir\n";
print STDERR "Like this:\n";
print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro\n";
print STDERR "May need to run this first:\n";
print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat\" amd64\n";
exit 1;
}
}
@ -133,7 +133,7 @@ sub build_pgprobackup
unless (-d 'src/tools/msvc' && -d 'src');
# my $vsVersion = DetermineVisualStudioVersion();
my $vsVersion = '12.00';
my $vsVersion = '16.00';
$solution = CreateSolution($vsVersion, $config);

6
nls.mk Normal file
View File

@ -0,0 +1,6 @@
# contrib/pg_probackup/nls.mk
CATALOG_NAME = pg_probackup
AVAIL_LANGUAGES = ru
GETTEXT_FILES = src/help.c
GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS)
GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS)

View File

@ -20,7 +20,15 @@ ulimit -n 1024
if [ ${DISTRIB} = 'centos' ] ; then
sed -i 's|^baseurl=http://|baseurl=https://|g' /etc/yum.repos.d/*.repo
if [ ${DISTRIB_VERSION} = '8' ]; then
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
fi
yum update -y
if [ ${DISTRIB_VERSION} = '8' ]; then
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
fi
fi
# PACKAGES NEEDED

View File

@ -15,7 +15,16 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g')
if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then
# update of rpm package is broken in rhel-7 (26/12/2022)
yum update -y
#yum update -y
if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
fi
yum update -y
if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
fi
fi
# yum upgrade -y || echo 'some packages in docker failed to upgrade'
# yum install -y sudo

View File

@ -15,7 +15,15 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g')
if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then
# update of rpm package is broken in rhel-7 (26/12/2022)
if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
fi
yum update -y
if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then
sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo
fi
fi
if [ ${PBK_EDITION} == 'ent' ]; then
@ -80,11 +88,13 @@ if [ $PBK_EDITION == 'std' ] ; then
# install POSTGRESQL
# rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-${DISTRIB_VERSION}-x86_64/pgdg-redhat-repo-latest.noarch.rpm
if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then
rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
else
rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
fi
#if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then
# rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
#else
# rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm
#fi
curl -o pgpro-repo-add.sh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/pgpro-repo-add.sh
sh pgpro-repo-add.sh
if [[ ${PG_VERSION} == '9.6' ]]; then
yum install -y postgrespro${PG_TOG}-server.x86_64

1880
po/ru.po Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1084,15 +1084,15 @@ get_backup_filelist(pgBackup *backup, bool strict)
COMP_FILE_CRC32(true, content_crc, buf, strlen(buf));
get_control_value(buf, "path", path, NULL, true);
get_control_value(buf, "size", NULL, &write_size, true);
get_control_value(buf, "mode", NULL, &mode, true);
get_control_value(buf, "is_datafile", NULL, &is_datafile, true);
get_control_value(buf, "is_cfs", NULL, &is_cfs, false);
get_control_value(buf, "crc", NULL, &crc, true);
get_control_value(buf, "compress_alg", compress_alg_string, NULL, false);
get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false);
get_control_value(buf, "dbOid", NULL, &dbOid, false);
get_control_value_str(buf, "path", path, sizeof(path),true);
get_control_value_int64(buf, "size", &write_size, true);
get_control_value_int64(buf, "mode", &mode, true);
get_control_value_int64(buf, "is_datafile", &is_datafile, true);
get_control_value_int64(buf, "is_cfs", &is_cfs, false);
get_control_value_int64(buf, "crc", &crc, true);
get_control_value_str(buf, "compress_alg", compress_alg_string, sizeof(compress_alg_string), false);
get_control_value_int64(buf, "external_dir_num", &external_dir_num, false);
get_control_value_int64(buf, "dbOid", &dbOid, false);
file = pgFileInit(path);
file->write_size = (int64) write_size;
@ -1107,28 +1107,28 @@ get_backup_filelist(pgBackup *backup, bool strict)
/*
* Optional fields
*/
if (get_control_value(buf, "linked", linked, NULL, false) && linked[0])
if (get_control_value_str(buf, "linked", linked, sizeof(linked), false) && linked[0])
{
file->linked = pgut_strdup(linked);
canonicalize_path(file->linked);
}
if (get_control_value(buf, "segno", NULL, &segno, false))
if (get_control_value_int64(buf, "segno", &segno, false))
file->segno = (int) segno;
if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false))
if (get_control_value_int64(buf, "n_blocks", &n_blocks, false))
file->n_blocks = (int) n_blocks;
if (get_control_value(buf, "n_headers", NULL, &n_headers, false))
if (get_control_value_int64(buf, "n_headers", &n_headers, false))
file->n_headers = (int) n_headers;
if (get_control_value(buf, "hdr_crc", NULL, &hdr_crc, false))
if (get_control_value_int64(buf, "hdr_crc", &hdr_crc, false))
file->hdr_crc = (pg_crc32) hdr_crc;
if (get_control_value(buf, "hdr_off", NULL, &hdr_off, false))
if (get_control_value_int64(buf, "hdr_off", &hdr_off, false))
file->hdr_off = hdr_off;
if (get_control_value(buf, "hdr_size", NULL, &hdr_size, false))
if (get_control_value_int64(buf, "hdr_size", &hdr_size, false))
file->hdr_size = (int) hdr_size;
parray_append(files, file);

View File

@ -2,7 +2,7 @@
*
* catchup.c: sync DB cluster
*
* Copyright (c) 2021, Postgres Professional
* Copyright (c) 2022, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -507,16 +507,20 @@ catchup_multithreaded_copy(int num_threads,
/* Run threads */
thread_interrupted = false;
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
for (i = 0; i < num_threads; i++)
if (!dry_run)
{
elog(VERBOSE, "Start thread num: %i", i);
pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i]));
for (i = 0; i < num_threads; i++)
{
elog(VERBOSE, "Start thread num: %i", i);
pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i]));
}
}
/* Wait threads */
for (i = 0; i < num_threads; i++)
{
pthread_join(threads[i], NULL);
if (!dry_run)
pthread_join(threads[i], NULL);
all_threads_successful &= threads_args[i].completed;
transfered_bytes_result += threads_args[i].transfered_bytes;
}
@ -706,9 +710,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
/* Start stream replication */
join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR);
fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST);
start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt,
current.start_lsn, current.tli, false);
if (!dry_run)
{
fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST);
start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt,
current.start_lsn, current.tli, false);
}
else
elog(INFO, "WAL streaming skipping with --dry-run option");
source_filelist = parray_new();
@ -779,9 +788,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
/* Build the page map from ptrack information */
make_pagemap_from_ptrack_2(source_filelist, source_conn,
source_node_info.ptrack_schema,
source_node_info.ptrack_version_num,
dest_redo.lsn);
source_node_info.ptrack_schema,
source_node_info.ptrack_version_num,
dest_redo.lsn);
time(&end_time);
elog(INFO, "Pagemap successfully extracted, time elapsed: %.0f sec",
difftime(end_time, start_time));
@ -820,9 +829,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
char dirpath[MAXPGPATH];
join_path_components(dirpath, dest_pgdata, file->rel_path);
elog(VERBOSE, "Create directory '%s'", dirpath);
fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST);
if (!dry_run)
fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST);
}
else
{
@ -853,15 +862,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"",
linked_path, to_path);
/* create tablespace directory */
if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0)
elog(ERROR, "Could not create tablespace directory \"%s\": %s",
linked_path, strerror(errno));
if (!dry_run)
{
/* create tablespace directory */
if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0)
elog(ERROR, "Could not create tablespace directory \"%s\": %s",
linked_path, strerror(errno));
/* create link to linked_path */
if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0)
elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s",
linked_path, to_path, strerror(errno));
/* create link to linked_path */
if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0)
elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s",
linked_path, to_path, strerror(errno));
}
}
}
@ -930,7 +942,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
char fullpath[MAXPGPATH];
join_path_components(fullpath, dest_pgdata, file->rel_path);
fio_delete(file->mode, fullpath, FIO_LOCAL_HOST);
if (!dry_run)
{
fio_delete(file->mode, fullpath, FIO_LOCAL_HOST);
}
elog(VERBOSE, "Deleted file \"%s\"", fullpath);
/* shrink dest pgdata list */
@ -961,7 +976,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
catchup_isok = transfered_datafiles_bytes != -1;
/* at last copy control file */
if (catchup_isok)
if (catchup_isok && !dry_run)
{
char from_fullpath[MAXPGPATH];
char to_fullpath[MAXPGPATH];
@ -972,7 +987,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
transfered_datafiles_bytes += source_pg_control_file->size;
}
if (!catchup_isok)
if (!catchup_isok && !dry_run)
{
char pretty_time[20];
char pretty_transfered_data_bytes[20];
@ -1010,14 +1025,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
pg_free(stop_backup_query_text);
}
wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, &current);
if (!dry_run)
wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, &current);
#if PG_VERSION_NUM >= 90600
/* Write backup_label */
Assert(stop_backup_result.backup_label_content != NULL);
pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label",
stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len,
NULL);
if (!dry_run)
{
pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label",
stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len,
NULL);
}
free(stop_backup_result.backup_label_content);
stop_backup_result.backup_label_content = NULL;
stop_backup_result.backup_label_content_len = 0;
@ -1040,6 +1059,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
#endif
/* wait for end of wal streaming and calculate wal size transfered */
if (!dry_run)
{
parray *wal_files_list = NULL;
wal_files_list = parray_new();
@ -1091,17 +1111,17 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
}
/* Sync all copied files unless '--no-sync' flag is used */
if (sync_dest_files)
if (sync_dest_files && !dry_run)
catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file);
else
elog(WARNING, "Files are not synced to disk");
/* Cleanup */
if (dest_filelist)
if (dest_filelist && !dry_run)
{
parray_walk(dest_filelist, pgFileFree);
parray_free(dest_filelist);
}
parray_free(dest_filelist);
parray_walk(source_filelist, pgFileFree);
parray_free(source_filelist);
pgFileFree(source_pg_control_file);

View File

@ -349,6 +349,8 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
Assert(false);
}
}
/* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */
fflush(in);
}
/*
@ -2030,10 +2032,10 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph,
return false; /* EOF found */
else if (read_len != 0 && feof(in))
elog(ERROR,
"Odd size page found at offset %lu of \"%s\"",
"Odd size page found at offset %ld of \"%s\"",
ftello(in), fullpath);
else
elog(ERROR, "Cannot read header at offset %lu of \"%s\": %s",
elog(ERROR, "Cannot read header at offset %ld of \"%s\": %s",
ftello(in), fullpath, strerror(errno));
}
@ -2321,7 +2323,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath,
elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s",
to_fullpath, strerror(errno));
{
size_t pos = ftell(out);
long pos = ftell(out);
if (pos < 0)
elog(ERROR, "Cannot get position in destination file \"%s\": %s",

View File

@ -36,7 +36,7 @@ do_delete(InstanceState *instanceState, time_t backup_id)
parray *backup_list,
*delete_list;
pgBackup *target_backup = NULL;
size_t size_to_delete = 0;
int64 size_to_delete = 0;
char size_to_delete_pretty[20];
/* Get complete list of backups */
@ -682,12 +682,11 @@ do_retention_wal(InstanceState *instanceState, bool dry_run)
* at least one backup and no file should be removed.
* Unless wal-depth is enabled.
*/
if ((tlinfo->closest_backup) && instance_config.wal_depth <= 0)
if ((tlinfo->closest_backup) && instance_config.wal_depth == 0)
continue;
/* WAL retention keeps this timeline from purge */
if (instance_config.wal_depth >= 0 && tlinfo->anchor_tli > 0 &&
tlinfo->anchor_tli != tlinfo->tli)
if (tlinfo->anchor_tli > 0 && tlinfo->anchor_tli != tlinfo->tli)
continue;
/*
@ -701,7 +700,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run)
*/
if (tlinfo->oldest_backup)
{
if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
{
delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn,
tlinfo, instance_config.xlog_seg_size, dry_run);
@ -714,7 +713,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run)
}
else
{
if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn)))
delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn,
tlinfo, instance_config.xlog_seg_size, dry_run);
else
@ -942,7 +941,7 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli
join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->file.name);
/* save segment from purging */
if (instance_config.wal_depth >= 0 && wal_file->keep)
if (wal_file->keep)
{
elog(VERBOSE, "Retain WAL segment \"%s\"", wal_fullpath);
continue;
@ -1027,7 +1026,7 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config,
parray *backup_list, *delete_list;
const char *pretty_status;
int n_deleted = 0, n_found = 0;
size_t size_to_delete = 0;
int64 size_to_delete = 0;
char size_to_delete_pretty[20];
pgBackup *backup;

155
src/dir.c
View File

@ -8,6 +8,7 @@
*-------------------------------------------------------------------------
*/
#include <assert.h>
#include "pg_probackup.h"
#include "utils/file.h"
@ -130,6 +131,9 @@ static void opt_path_map(ConfigOption *opt, const char *arg,
TablespaceList *list, const char *type);
static void cleanup_tablespace(const char *path);
static void control_string_bad_format(const char* str);
/* Tablespace mapping */
static TablespaceList tablespace_dirs = {NULL, NULL};
/* Extra directories mapping */
@ -1036,13 +1040,20 @@ opt_externaldir_map(ConfigOption *opt, const char *arg)
*/
void
create_data_directories(parray *dest_files, const char *data_dir, const char *backup_dir,
bool extract_tablespaces, bool incremental, fio_location location)
bool extract_tablespaces, bool incremental, fio_location location,
const char* waldir_path)
{
int i;
parray *links = NULL;
mode_t pg_tablespace_mode = DIR_PERMISSION;
char to_path[MAXPGPATH];
if (waldir_path && !dir_is_empty(waldir_path, location))
{
elog(ERROR, "WAL directory location is not empty: \"%s\"", waldir_path);
}
/* get tablespace map */
if (extract_tablespaces)
{
@ -1107,6 +1118,27 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba
/* skip external directory content */
if (dir->external_dir_num != 0)
continue;
/* Create WAL directory and symlink if waldir_path is setting */
if (waldir_path && strcmp(dir->rel_path, PG_XLOG_DIR) == 0) {
/* get full path to PG_XLOG_DIR */
join_path_components(to_path, data_dir, PG_XLOG_DIR);
elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"",
waldir_path, to_path);
/* create tablespace directory from waldir_path*/
fio_mkdir(waldir_path, pg_tablespace_mode, location);
/* create link to linked_path */
if (fio_symlink(waldir_path, to_path, incremental, location) < 0)
elog(ERROR, "Could not create symbolic link \"%s\": %s",
to_path, strerror(errno));
continue;
}
/* tablespace_map exists */
if (links)
@ -1467,7 +1499,7 @@ get_external_remap(char *current_dir)
return current_dir;
}
/* Parsing states for get_control_value() */
/* Parsing states for get_control_value_str() */
#define CONTROL_WAIT_NAME 1
#define CONTROL_INNAME 2
#define CONTROL_WAIT_COLON 3
@ -1481,26 +1513,62 @@ get_external_remap(char *current_dir)
* The line has the following format:
* {"name1":"value1", "name2":"value2"}
*
* The value will be returned to "value_str" as string if it is not NULL. If it
* is NULL the value will be returned to "value_int64" as int64.
* The value will be returned in "value_int64" as int64.
*
* Returns true if the value was found in the line and parsed.
*/
bool
get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory)
{
char buf_int64[32];
assert(value_int64);
/* Set default value */
*value_int64 = 0;
if (!get_control_value_str(str, name, buf_int64, sizeof(buf_int64), is_mandatory))
return false;
if (!parse_int64(buf_int64, value_int64, 0))
{
/* We assume that too big value is -1 */
if (errno == ERANGE)
*value_int64 = BYTES_INVALID;
else
control_string_bad_format(str);
return false;
}
return true;
}
/*
* Get value from json-like line "str" of backup_content.control file.
*
* The line has the following format:
* {"name1":"value1", "name2":"value2"}
*
* The value will be returned to "value_str" as string.
*
* Returns true if the value was found in the line.
*/
bool
get_control_value(const char *str, const char *name,
char *value_str, int64 *value_int64, bool is_mandatory)
get_control_value_str(const char *str, const char *name,
char *value_str, size_t value_str_size, bool is_mandatory)
{
int state = CONTROL_WAIT_NAME;
char *name_ptr = (char *) name;
char *buf = (char *) str;
char buf_int64[32], /* Buffer for "value_int64" */
*buf_int64_ptr = buf_int64;
char *const value_str_start = value_str;
/* Set default values */
if (value_str)
*value_str = '\0';
else if (value_int64)
*value_int64 = 0;
assert(value_str);
assert(value_str_size > 0);
/* Set default value */
*value_str = '\0';
while (*buf)
{
@ -1510,7 +1578,7 @@ get_control_value(const char *str, const char *name,
if (*buf == '"')
state = CONTROL_INNAME;
else if (IsAlpha(*buf))
goto bad_format;
control_string_bad_format(str);
break;
case CONTROL_INNAME:
/* Found target field. Parse value. */
@ -1529,57 +1597,32 @@ get_control_value(const char *str, const char *name,
if (*buf == ':')
state = CONTROL_WAIT_VALUE;
else if (!IsSpace(*buf))
goto bad_format;
control_string_bad_format(str);
break;
case CONTROL_WAIT_VALUE:
if (*buf == '"')
{
state = CONTROL_INVALUE;
buf_int64_ptr = buf_int64;
}
else if (IsAlpha(*buf))
goto bad_format;
control_string_bad_format(str);
break;
case CONTROL_INVALUE:
/* Value was parsed, exit */
if (*buf == '"')
{
if (value_str)
{
*value_str = '\0';
}
else if (value_int64)
{
/* Length of buf_uint64 should not be greater than 31 */
if (buf_int64_ptr - buf_int64 >= 32)
elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s",
name, str, DATABASE_FILE_LIST);
*buf_int64_ptr = '\0';
if (!parse_int64(buf_int64, value_int64, 0))
{
/* We assume that too big value is -1 */
if (errno == ERANGE)
*value_int64 = BYTES_INVALID;
else
goto bad_format;
}
}
*value_str = '\0';
return true;
}
else
{
if (value_str)
{
*value_str = *buf;
value_str++;
}
else
{
*buf_int64_ptr = *buf;
buf_int64_ptr++;
/* verify if value_str not exceeds value_str_size limits */
if (value_str - value_str_start >= value_str_size - 1) {
elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s",
name, str, DATABASE_FILE_LIST);
}
*value_str = *buf;
value_str++;
}
break;
case CONTROL_WAIT_NEXT_NAME:
@ -1596,18 +1639,20 @@ get_control_value(const char *str, const char *name,
/* There is no close quotes */
if (state == CONTROL_INNAME || state == CONTROL_INVALUE)
goto bad_format;
control_string_bad_format(str);
/* Did not find target field */
if (is_mandatory)
elog(ERROR, "field \"%s\" is not found in the line %s of the file %s",
name, str, DATABASE_FILE_LIST);
return false;
}
bad_format:
elog(ERROR, "%s file has invalid format in line %s",
DATABASE_FILE_LIST, str);
return false; /* Make compiler happy */
static void
control_string_bad_format(const char* str)
{
elog(ERROR, "%s file has invalid format in line %s",
DATABASE_FILE_LIST, str);
}
/*
@ -1841,8 +1886,8 @@ read_database_map(pgBackup *backup)
db_map_entry *db_entry = (db_map_entry *) pgut_malloc(sizeof(db_map_entry));
get_control_value(buf, "dbOid", NULL, &dbOid, true);
get_control_value(buf, "datname", datname, NULL, true);
get_control_value_int64(buf, "dbOid", &dbOid, true);
get_control_value_str(buf, "datname", datname, sizeof(datname), true);
db_entry->dbOid = dbOid;
db_entry->datname = pgut_strdup(datname);

View File

@ -169,6 +169,7 @@ help_pg_probackup(void)
printf(_(" [-T OLDDIR=NEWDIR] [--progress]\n"));
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
printf(_(" [--skip-external-dirs] [--no-sync]\n"));
printf(_(" [-X WALDIR | --waldir=WALDIR]\n"));
printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n"));
printf(_(" [--db-include | --db-exclude]\n"));
printf(_(" [--remote-proto] [--remote-host]\n"));
@ -261,15 +262,16 @@ help_pg_probackup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
printf(_(" [--dry-run]\n"));
printf(_(" [--help]\n"));
if ((PROGRAM_URL || PROGRAM_EMAIL))
{
printf("\n");
if (PROGRAM_URL)
printf("Read the website for details. <%s>\n", PROGRAM_URL);
printf(_("Read the website for details <%s>.\n"), PROGRAM_URL);
if (PROGRAM_EMAIL)
printf("Report bugs to <%s>.\n", PROGRAM_EMAIL);
printf(_("Report bugs to <%s>.\n"), PROGRAM_EMAIL);
}
}
@ -434,6 +436,7 @@ help_restore(void)
printf(_(" [-T OLDDIR=NEWDIR]\n"));
printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n"));
printf(_(" [--skip-external-dirs]\n"));
printf(_(" [-X WALDIR | --waldir=WALDIR]\n"));
printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n"));
printf(_(" [--db-include dbname | --db-exclude dbname]\n"));
printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n"));
@ -471,6 +474,10 @@ help_restore(void)
printf(_(" relocate the external directory from OLDDIR to NEWDIR\n"));
printf(_(" --skip-external-dirs do not restore all external directories\n"));
printf(_(" -X, --waldir=WALDIR location for the write-ahead log directory\n"));
printf(_("\n Incremental restore options:\n"));
printf(_(" -I, --incremental-mode=none|checksum|lsn\n"));
printf(_(" reuse valid pages available in PGDATA if they have not changed\n"));
@ -1047,6 +1054,7 @@ help_catchup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
printf(_(" [--dry-run]\n"));
printf(_(" [--help]\n\n"));
printf(_(" -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n"));
@ -1081,4 +1089,6 @@ help_catchup(void)
printf(_(" --remote-user=username user name for ssh connection (default: current user)\n"));
printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n"));
printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n"));
printf(_(" --dry-run perform a trial run without any changes\n\n"));
}

View File

@ -614,7 +614,7 @@ merge_chain(InstanceState *instanceState,
/* Create directories */
create_data_directories(dest_backup->files, full_database_dir,
dest_backup->root_dir, false, false, FIO_BACKUP_HOST);
dest_backup->root_dir, false, false, FIO_BACKUP_HOST, NULL);
/* External directories stuff */
if (dest_backup->external_dir_str)

View File

@ -122,6 +122,7 @@ static parray *datname_include_list = NULL;
/* arrays for --exclude-path's */
static parray *exclude_absolute_paths_list = NULL;
static parray *exclude_relative_paths_list = NULL;
static char* gl_waldir_path = NULL;
/* checkdb options */
bool need_amcheck = false;
@ -238,6 +239,7 @@ static ConfigOption cmd_options[] =
{ 's', 160, "primary-conninfo", &primary_conninfo, SOURCE_CMD_STRICT },
{ 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT },
{ 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT },
{ 's', 'X', "waldir", &gl_waldir_path, SOURCE_CMD_STRICT },
/* checkdb options */
{ 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT },
{ 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT },
@ -308,6 +310,7 @@ main(int argc, char *argv[])
init_config(&instance_config, instance_name);
PROGRAM_NAME = get_progname(argv[0]);
set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_probackup"));
PROGRAM_FULL_PATH = palloc0(MAXPGPATH);
/* Get current time */
@ -753,6 +756,21 @@ main(int argc, char *argv[])
restore_params->partial_restore_type = INCLUDE;
restore_params->partial_db_list = datname_include_list;
}
if (gl_waldir_path)
{
/* clean up xlog directory name, check it's absolute */
canonicalize_path(gl_waldir_path);
if (!is_absolute_path(gl_waldir_path))
{
elog(ERROR, "WAL directory location must be an absolute path");
}
if (strlen(gl_waldir_path) > MAXPGPATH)
elog(ERROR, "Value specified to --waldir is too long");
}
restore_params->waldir = gl_waldir_path;
}
/*

View File

@ -338,7 +338,7 @@ typedef enum ShowFormat
#define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */
#define FILE_NOT_FOUND (-2) /* file disappeared during backup */
#define BLOCKNUM_INVALID (-1)
#define PROGRAM_VERSION "2.5.5"
#define PROGRAM_VERSION "2.5.6"
/* update when remote agent API or behaviour changes */
#define AGENT_PROTOCOL_VERSION 20501
@ -566,6 +566,8 @@ typedef struct pgRestoreParams
/* options for partial restore */
PartialRestoreType partial_restore_type;
parray *partial_db_list;
char* waldir;
} pgRestoreParams;
/* Options needed for set-backup command */
@ -1010,8 +1012,9 @@ extern CompressAlg parse_compress_alg(const char *arg);
extern const char* deparse_compress_alg(int alg);
/* in dir.c */
extern bool get_control_value(const char *str, const char *name,
char *value_str, int64 *value_int64, bool is_mandatory);
extern bool get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory);
extern bool get_control_value_str(const char *str, const char *name,
char *value_str, size_t value_str_size, bool is_mandatory);
extern void dir_list_file(parray *files, const char *root, bool exclude,
bool follow_symlink, bool add_root, bool backup_logs,
bool skip_hidden, int external_dir_num, fio_location location);
@ -1022,7 +1025,8 @@ extern void create_data_directories(parray *dest_files,
const char *backup_dir,
bool extract_tablespaces,
bool incremental,
fio_location location);
fio_location location,
const char *waldir_path);
extern void read_tablespace_map(parray *links, const char *backup_dir);
extern void opt_tablespace_map(ConfigOption *opt, const char *arg);

View File

@ -801,7 +801,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
create_data_directories(dest_files, instance_config.pgdata,
dest_backup->root_dir, backup_has_tblspc,
params->incremental_mode != INCR_NONE,
FIO_DB_HOST);
FIO_DB_HOST, params->waldir);
/*
* Restore dest_backup external directories.

View File

@ -489,8 +489,10 @@ fio_disconnect(void)
Assert(hdr.cop == FIO_DISCONNECTED);
SYS_CHECK(close(fio_stdin));
SYS_CHECK(close(fio_stdout));
SYS_CHECK(close(fio_stderr));
fio_stdin = 0;
fio_stdout = 0;
fio_stderr = 0;
wait_ssh();
}
}
@ -3403,7 +3405,8 @@ fio_communicate(int in, int out)
case FIO_DISCONNECT:
hdr.cop = FIO_DISCONNECTED;
IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));
break;
free(buf);
return;
case FIO_GET_ASYNC_ERROR:
fio_get_async_error_impl(out);
break;

View File

@ -147,6 +147,9 @@ bool launch_agent(void)
ssh_argv[ssh_argc++] = "-o";
ssh_argv[ssh_argc++] = "Compression=no";
ssh_argv[ssh_argc++] = "-o";
ssh_argv[ssh_argc++] = "ControlMaster=no";
ssh_argv[ssh_argc++] = "-o";
ssh_argv[ssh_argc++] = "LogLevel=error";

View File

@ -1,4 +1,4 @@
[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup)
****[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup)
```
Note: For now these tests work on Linux and "kinda" work on Windows
@ -31,7 +31,7 @@ Remote backup depends on key authentication to local machine via ssh as current
export PGPROBACKUP_SSH_REMOTE=ON
Run tests that are relied on advanced debugging features. For this mode, pg_probackup should be compiled without optimizations. For example:
CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests
CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls
export PGPROBACKUP_GDB=ON
@ -41,6 +41,8 @@ Run suit of basic simple tests:
Run ptrack tests:
export PG_PROBACKUP_PTRACK=ON
Run long (time consuming) tests:
export PG_PROBACKUP_LONG=ON
Usage:
sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope
@ -48,3 +50,20 @@ Usage:
export PG_CONFIG=/path/to/pg_config
python -m unittest [-v] tests[.specific_module][.class.test]
```
# Troubleshooting FAQ
## Python tests failure
### 1. Could not open extension "..."
```
testgres.exceptions.QueryException ERROR: could not open extension control file "<postgres_build_dir>/share/extension/amcheck.control": No such file or directory
```
#### Solution:
You have no `<postgres_src_root>/contrib/...` extension installed, please do
```commandline
cd <postgres_src_root>
make install-world
```

View File

@ -7,7 +7,7 @@ from . import init, merge, option, show, compatibility, \
compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \
cfs_validate_backup, auth_test, time_stamp, logging, \
locking, remote, external, config, checkdb, set_backup, incr_restore, \
catchup, CVE_2018_1058
catchup, CVE_2018_1058, time_consuming
def load_tests(loader, tests, pattern):
@ -21,6 +21,12 @@ def load_tests(loader, tests, pattern):
if os.environ['PG_PROBACKUP_PTRACK'] == 'ON':
suite.addTests(loader.loadTestsFromModule(ptrack))
# PG_PROBACKUP_LONG section for tests that are long
# by design e.g. they contain loops, sleeps and so on
if 'PG_PROBACKUP_LONG' in os.environ:
if os.environ['PG_PROBACKUP_LONG'] == 'ON':
suite.addTests(loader.loadTestsFromModule(time_consuming))
# suite.addTests(loader.loadTestsFromModule(auth_test))
suite.addTests(loader.loadTestsFromModule(archive))
suite.addTests(loader.loadTestsFromModule(backup))

View File

@ -228,6 +228,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
Check pg_stop_backup_timeout, needed backup_timeout
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -290,6 +292,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
Check pg_stop_backup_timeout, libpq-timeout requested.
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -313,7 +317,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
gdb.set_breakpoint('pg_stop_backup')
gdb.run_until_break()
self.set_auto_conf(node, {'archive_command': "'exit 1'"})
self.set_auto_conf(node, {'archive_command': 'exit 1'})
node.reload()
os.environ["PGAPPNAME"] = "foo"

View File

@ -1095,6 +1095,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_drop_rel_during_full_backup(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1244,6 +1246,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_drop_rel_during_backup_delta(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1313,6 +1317,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_drop_rel_during_backup_page(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1445,6 +1451,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_backup_concurrent_drop_table(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1579,6 +1587,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_sigint_handling(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1618,6 +1628,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_sigterm_handling(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1656,6 +1668,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_sigquit_handling(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -2905,6 +2919,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_missing_wal_segment(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -3295,6 +3311,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_backup_atexit(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(

View File

@ -1455,3 +1455,157 @@ class CatchupTest(ProbackupTest, unittest.TestCase):
dst_pg.stop()
#self.assertEqual(1, 0, 'Stop test')
self.del_test_dir(module_name, self.fname)
#########################################
# --dry-run
#########################################
def test_dry_run_catchup_full(self):
"""
Test dry-run option for full catchup
"""
# preparation 1: source
src_pg = self.make_simple_node(
base_dir = os.path.join(module_name, self.fname, 'src'),
set_replication = True
)
src_pg.slow_start()
# preparation 2: make clean shutdowned lagging behind replica
dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
src_pg.pgbench_init(scale = 10)
pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
pgbench.wait()
# save the condition before dry-run
content_before = self.pgdata_content(dst_pg.data_dir)
# do full catchup
self.catchup_node(
backup_mode = 'FULL',
source_pgdata = src_pg.data_dir,
destination_node = dst_pg,
options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run']
)
# compare data dirs before and after catchup
self.compare_pgdata(
content_before,
self.pgdata_content(dst_pg.data_dir)
)
# Cleanup
src_pg.stop()
self.del_test_dir(module_name, self.fname)
def test_dry_run_catchup_ptrack(self):
"""
Test dry-run option for catchup in incremental ptrack mode
"""
if not self.ptrack:
return unittest.skip('Skipped because ptrack support is disabled')
# preparation 1: source
src_pg = self.make_simple_node(
base_dir = os.path.join(module_name, self.fname, 'src'),
set_replication = True,
ptrack_enable = True,
initdb_params = ['--data-checksums']
)
src_pg.slow_start()
src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack")
src_pg.pgbench_init(scale = 10)
pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
pgbench.wait()
# preparation 2: make clean shutdowned lagging behind replica
dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
self.catchup_node(
backup_mode = 'FULL',
source_pgdata = src_pg.data_dir,
destination_node = dst_pg,
options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
)
self.set_replica(src_pg, dst_pg)
dst_options = {}
dst_options['port'] = str(dst_pg.port)
self.set_auto_conf(dst_pg, dst_options)
dst_pg.slow_start(replica = True)
dst_pg.stop()
# save the condition before dry-run
content_before = self.pgdata_content(dst_pg.data_dir)
# do incremental catchup
self.catchup_node(
backup_mode = 'PTRACK',
source_pgdata = src_pg.data_dir,
destination_node = dst_pg,
options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run']
)
# compare data dirs before and after cathup
self.compare_pgdata(
content_before,
self.pgdata_content(dst_pg.data_dir)
)
# Cleanup
src_pg.stop()
self.del_test_dir(module_name, self.fname)
def test_dry_run_catchup_delta(self):
"""
Test dry-run option for catchup in incremental delta mode
"""
# preparation 1: source
src_pg = self.make_simple_node(
base_dir = os.path.join(module_name, self.fname, 'src'),
set_replication = True,
initdb_params = ['--data-checksums'],
pg_options = { 'wal_log_hints': 'on' }
)
src_pg.slow_start()
src_pg.pgbench_init(scale = 10)
pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum'])
pgbench.wait()
# preparation 2: make clean shutdowned lagging behind replica
dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst'))
self.catchup_node(
backup_mode = 'FULL',
source_pgdata = src_pg.data_dir,
destination_node = dst_pg,
options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream']
)
self.set_replica(src_pg, dst_pg)
dst_options = {}
dst_options['port'] = str(dst_pg.port)
self.set_auto_conf(dst_pg, dst_options)
dst_pg.slow_start(replica = True)
dst_pg.stop()
# save the condition before dry-run
content_before = self.pgdata_content(dst_pg.data_dir)
# do delta catchup
self.catchup_node(
backup_mode = 'DELTA',
source_pgdata = src_pg.data_dir,
destination_node = dst_pg,
options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', "--dry-run"]
)
# compare data dirs before and after cathup
self.compare_pgdata(
content_before,
self.pgdata_content(dst_pg.data_dir)
)
# Cleanup
src_pg.stop()
self.del_test_dir(module_name, self.fname)

View File

@ -17,6 +17,8 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_checkdb_amcheck_only_sanity(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -544,11 +546,8 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_checkdb_sigint_handling(self):
""""""
if not self.gdb:
self.skipTest(
"Specify PGPROBACKUP_GDB and build without "
"optimizations for run this test"
)
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(

View File

@ -472,11 +472,8 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
make node, make full and delta stream backups,
restore them and check data correctness
"""
if not self.gdb:
self.skipTest(
"Specify PGPROBACKUP_GDB and build without "
"optimizations for run this test"
)
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(

View File

@ -203,8 +203,10 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_exclude_unlogged_tables_2(self):
"""
make node, create unlogged, take FULL, check
that unlogged was not backed up
1. make node, create unlogged, take FULL, DELTA, PAGE,
check that unlogged table files was not backed up
2. restore FULL, DELTA, PAGE to empty db,
ensure unlogged table exist and is epmty
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -220,6 +222,8 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
backup_ids = []
for backup_type in ['full', 'delta', 'page']:
if backup_type == 'full':
@ -231,14 +235,16 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
'postgres',
'insert into test select generate_series(0,20050000)::text')
rel_path = node.safe_psql(
rel_path = node.execute(
'postgres',
"select pg_relation_filepath('test')").decode('utf-8').rstrip()
"select pg_relation_filepath('test')")[0][0]
backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type=backup_type, options=['--stream'])
backup_ids.append(backup_id)
filelist = self.get_backup_filelist(
backup_dir, 'node', backup_id)
@ -258,9 +264,25 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
rel_path + '.3', filelist,
"Unlogged table was not excluded")
# ensure restoring retrieves back only empty unlogged table
for backup_id in backup_ids:
node.stop()
node.cleanup()
self.restore_node(backup_dir, 'node', node, backup_id=backup_id)
node.slow_start()
self.assertEqual(
node.execute(
'postgres',
'select count(*) from test')[0][0],
0)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_exclude_log_dir(self):
"""

View File

@ -86,6 +86,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[-T OLDDIR=NEWDIR] [--progress]
[--external-mapping=OLDDIR=NEWDIR]
[--skip-external-dirs] [--no-sync]
[-X WALDIR | --waldir=WALDIR]
[-I | --incremental-mode=none|checksum|lsn]
[--db-include | --db-exclude]
[--remote-proto] [--remote-host]
@ -178,7 +179,8 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--dry-run]
[--help]
Read the website for details. <https://github.com/postgrespro/pg_probackup>
Read the website for details <https://github.com/postgrespro/pg_probackup>.
Report bugs to <https://github.com/postgrespro/pg_probackup/issues>.

View File

@ -0,0 +1,186 @@
pg_probackup - утилита для управления резервным копированием/восстановлением базы данных PostgreSQL.
pg_probackup help [COMMAND]
pg_probackup version
pg_probackup init -B backup-path
pg_probackup set-config -B backup-path --instance=instance_name
[-D pgdata-path]
[--external-dirs=external-directories-paths]
[--log-level-console=log-level-console]
[--log-level-file=log-level-file]
[--log-filename=log-filename]
[--error-log-filename=error-log-filename]
[--log-directory=log-directory]
[--log-rotation-size=log-rotation-size]
[--log-rotation-age=log-rotation-age]
[--retention-redundancy=retention-redundancy]
[--retention-window=retention-window]
[--wal-depth=wal-depth]
[--compress-algorithm=compress-algorithm]
[--compress-level=compress-level]
[--archive-timeout=timeout]
[-d dbname] [-h host] [-p port] [-U username]
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--restore-command=cmdline] [--archive-host=destination]
[--archive-port=port] [--archive-user=username]
[--help]
pg_probackup set-backup -B backup-path --instance=instance_name
-i backup-id [--ttl=interval] [--expire-time=timestamp]
[--note=text]
[--help]
pg_probackup show-config -B backup-path --instance=instance_name
[--format=format]
[--help]
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
[-D pgdata-path] [-C]
[--stream [-S slot-name] [--temp-slot]]
[--backup-pg-log] [-j num-threads] [--progress]
[--no-validate] [--skip-block-validation]
[--external-dirs=external-directories-paths]
[--no-sync]
[--log-level-console=log-level-console]
[--log-level-file=log-level-file]
[--log-filename=log-filename]
[--error-log-filename=error-log-filename]
[--log-directory=log-directory]
[--log-rotation-size=log-rotation-size]
[--log-rotation-age=log-rotation-age] [--no-color]
[--delete-expired] [--delete-wal] [--merge-expired]
[--retention-redundancy=retention-redundancy]
[--retention-window=retention-window]
[--wal-depth=wal-depth]
[--compress]
[--compress-algorithm=compress-algorithm]
[--compress-level=compress-level]
[--archive-timeout=archive-timeout]
[-d dbname] [-h host] [-p port] [-U username]
[-w --no-password] [-W --password]
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--ttl=interval] [--expire-time=timestamp] [--note=text]
[--help]
pg_probackup restore -B backup-path --instance=instance_name
[-D pgdata-path] [-i backup-id] [-j num-threads]
[--recovery-target-time=time|--recovery-target-xid=xid
|--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]
[--recovery-target-timeline=timeline]
[--recovery-target=immediate|latest]
[--recovery-target-name=target-name]
[--recovery-target-action=pause|promote|shutdown]
[--restore-command=cmdline]
[-R | --restore-as-replica] [--force]
[--primary-conninfo=primary_conninfo]
[-S | --primary-slot-name=slotname]
[--no-validate] [--skip-block-validation]
[-T OLDDIR=NEWDIR] [--progress]
[--external-mapping=OLDDIR=NEWDIR]
[--skip-external-dirs] [--no-sync]
[-X WALDIR | --waldir=WALDIR]
[-I | --incremental-mode=none|checksum|lsn]
[--db-include | --db-exclude]
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--archive-host=hostname]
[--archive-port=port] [--archive-user=username]
[--help]
pg_probackup validate -B backup-path [--instance=instance_name]
[-i backup-id] [--progress] [-j num-threads]
[--recovery-target-time=time|--recovery-target-xid=xid
|--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]
[--recovery-target-timeline=timeline]
[--recovery-target-name=target-name]
[--skip-block-validation]
[--help]
pg_probackup checkdb [-B backup-path] [--instance=instance_name]
[-D pgdata-path] [--progress] [-j num-threads]
[--amcheck] [--skip-block-validation]
[--heapallindexed] [--checkunique]
[--help]
pg_probackup show -B backup-path
[--instance=instance_name [-i backup-id]]
[--format=format] [--archive]
[--no-color] [--help]
pg_probackup delete -B backup-path --instance=instance_name
[-j num-threads] [--progress]
[--retention-redundancy=retention-redundancy]
[--retention-window=retention-window]
[--wal-depth=wal-depth]
[-i backup-id | --delete-expired | --merge-expired | --status=backup_status]
[--delete-wal]
[--dry-run] [--no-validate] [--no-sync]
[--help]
pg_probackup merge -B backup-path --instance=instance_name
-i backup-id [--progress] [-j num-threads]
[--no-validate] [--no-sync]
[--help]
pg_probackup add-instance -B backup-path -D pgdata-path
--instance=instance_name
[--external-dirs=external-directories-paths]
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--help]
pg_probackup del-instance -B backup-path
--instance=instance_name
[--help]
pg_probackup archive-push -B backup-path --instance=instance_name
--wal-file-name=wal-file-name
[--wal-file-path=wal-file-path]
[-j num-threads] [--batch-size=batch_size]
[--archive-timeout=timeout]
[--no-ready-rename] [--no-sync]
[--overwrite] [--compress]
[--compress-algorithm=compress-algorithm]
[--compress-level=compress-level]
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--help]
pg_probackup archive-get -B backup-path --instance=instance_name
--wal-file-path=wal-file-path
--wal-file-name=wal-file-name
[-j num-threads] [--batch-size=batch_size]
[--no-validate-wal]
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--help]
pg_probackup catchup -b catchup-mode
--source-pgdata=path_to_pgdata_on_remote_server
--destination-pgdata=path_to_local_dir
[--stream [-S slot-name] [--temp-slot | --perm-slot]]
[-j num-threads]
[-T OLDDIR=NEWDIR]
[--exclude-path=path_prefix]
[-d dbname] [-h host] [-p port] [-U username]
[-w --no-password] [-W --password]
[--remote-proto] [--remote-host]
[--remote-port] [--remote-path] [--remote-user]
[--ssh-options]
[--dry-run]
[--help]
Подробнее читайте на сайте <https://github.com/postgrespro/pg_probackup>.
Сообщайте об ошибках в <https://github.com/postgrespro/pg_probackup/issues>.

View File

@ -1 +1 @@
pg_probackup 2.5.5
pg_probackup 2.5.6

View File

@ -89,11 +89,7 @@ def dir_files(base_dir):
def is_enterprise():
# pg_config --help
if os.name == 'posix':
cmd = [os.environ['PG_CONFIG'], '--pgpro-edition']
elif os.name == 'nt':
cmd = [[os.environ['PG_CONFIG']], ['--pgpro-edition']]
cmd = [os.environ['PG_CONFIG'], '--help']
p = subprocess.Popen(
cmd,
@ -102,6 +98,18 @@ def is_enterprise():
)
return b'postgrespro.ru' in p.communicate()[0]
def is_nls_enabled():
cmd = [os.environ['PG_CONFIG'], '--configure']
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
return b'enable-nls' in p.communicate()[0]
class ProbackupException(Exception):
def __init__(self, message, cmd):
self.message = message
@ -147,6 +155,7 @@ def slow_start(self, replica=False):
class ProbackupTest(object):
# Class attributes
enterprise = is_enterprise()
enable_nls = is_nls_enabled()
def __init__(self, *args, **kwargs):
super(ProbackupTest, self).__init__(*args, **kwargs)
@ -180,8 +189,8 @@ class ProbackupTest(object):
self.test_env['LC_MESSAGES'] = 'C'
self.test_env['LC_TIME'] = 'C'
self.gdb = 'PGPROBACKUP_GDB' in os.environ and \
os.environ['PGPROBACKUP_GDB'] == 'ON'
self.gdb = 'PGPROBACKUP_GDB' in self.test_env and \
self.test_env['PGPROBACKUP_GDB'] == 'ON'
self.paranoia = 'PG_PROBACKUP_PARANOIA' in self.test_env and \
self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON'
@ -810,7 +819,7 @@ class ProbackupTest(object):
if self.verbose:
print(self.cmd)
if gdb:
return GDBobj([binary_path] + command, self.verbose)
return GDBobj([binary_path] + command, self)
if asynchronous:
return subprocess.Popen(
[binary_path] + command,
@ -1861,22 +1870,34 @@ class ProbackupTest(object):
self.assertFalse(fail, error_message)
def gdb_attach(self, pid):
return GDBobj([str(pid)], self.verbose, attach=True)
return GDBobj([str(pid)], self, attach=True)
def _check_gdb_flag_or_skip_test(self):
if not self.gdb:
self.skipTest(
"Specify PGPROBACKUP_GDB and build without "
"optimizations for run this test"
)
class GdbException(Exception):
def __init__(self, message=False):
def __init__(self, message="False"):
self.message = message
def __str__(self):
return '\n ERROR: {0}\n'.format(repr(self.message))
class GDBobj(ProbackupTest):
def __init__(self, cmd, verbose, attach=False):
self.verbose = verbose
class GDBobj:
def __init__(self, cmd, env, attach=False):
self.verbose = env.verbose
self.output = ''
# Check gdb flag is set up
if not env.gdb:
raise GdbException("No `PGPROBACKUP_GDB=on` is set, "
"test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start "
"and be skipped")
# Check gdb presense
try:
gdb_version, _ = subprocess.Popen(

View File

@ -17,6 +17,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
run validate, expect it to successfully executed,
concurrent RUNNING backup with pid file and active process is legal
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -72,6 +74,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
RUNNING backup with pid file AND without active pid is legal,
but his status must be changed to ERROR and pid file is deleted
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -142,6 +146,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
RUNNING backup with pid file AND without active pid is legal,
but his status must be changed to ERROR and pid file is deleted
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -240,6 +246,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
RUNNING backup without pid file AND without active pid is legal,
his status must be changed to ERROR
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -310,6 +318,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
Expect restore to sucseed because read-only locks
do not conflict
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -352,6 +362,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
Expect restore to fail because validation of
intermediate backup is impossible
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -443,6 +455,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
and stop it in the middle, delete full backup.
Expect it to fail.
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -585,6 +599,8 @@ class LockingTest(ProbackupTest, unittest.TestCase):
"""
Make sure that shared lock leaves no files with pids
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),

View File

@ -12,6 +12,10 @@ class LogTest(ProbackupTest, unittest.TestCase):
# @unittest.expectedFailure
# PGPRO-2154
def test_log_rotation(self):
"""
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),

View File

@ -975,6 +975,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
"""
Check that failed MERGE can be continued
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1051,6 +1053,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
"""
Fail merge via gdb, corrupt DELTA backup, try to continue merge
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1148,6 +1152,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
"""
Check that failed MERGE on delete can be continued
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1219,6 +1225,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
Check that failed MERGE cannot be continued if intermediate
backup is missing.
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1409,6 +1417,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
check that crashing after opening backup.control
for writing will not result in losing backup metadata
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1461,6 +1471,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
for writing will not result in losing metadata about backup files
TODO: rewrite
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1552,6 +1564,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
for writing will not result in losing metadata about backup files
TODO: rewrite
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1639,6 +1653,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_failed_merge_after_delete(self):
"""
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1720,6 +1736,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_failed_merge_after_delete_1(self):
"""
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1796,6 +1814,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_failed_merge_after_delete_2(self):
"""
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1858,6 +1878,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_failed_merge_after_delete_3(self):
"""
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -2281,6 +2303,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_idempotent_merge(self):
"""
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -2580,6 +2604,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
page header map cannot be trusted when
running retry
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -2626,6 +2652,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_missing_data_file(self):
"""
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -2684,6 +2712,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_missing_non_data_file(self):
"""
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -2741,6 +2771,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_merge_remote_mode(self):
"""
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(

View File

@ -1,6 +1,7 @@
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
import locale
module_name = 'option'
@ -23,7 +24,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
"""help options"""
with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out:
self.assertIn(
version_out.read().decode("utf-8"),
version_out.read().decode("utf-8").strip(),
self.run_pb(["--version"])
)
@ -226,3 +227,17 @@ class OptionTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_help_6(self):
"""help options"""
if ProbackupTest.enable_nls:
self.test_env['LC_ALL'] = 'ru_RU.utf-8'
with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out:
self.assertEqual(
self.run_pb(["--help"]),
help_out.read().decode("utf-8")
)
else:
return unittest.skip(
'You need configure PostgreSQL with --enabled-nls option for this test')

View File

@ -18,11 +18,8 @@ class BugTest(ProbackupTest, unittest.TestCase):
"""
https://jira.postgrespro.ru/browse/PGPRO-2068
"""
if not self.gdb:
self.skipTest(
"Specify PGPROBACKUP_GDB and build without "
"optimizations for run this test"
)
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),

View File

@ -822,6 +822,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
def test_ptrack_vacuum_full(self):
"""make node, make full and ptrack stream backups,
restore them and check data correctness"""
self._check_gdb_flag_or_skip_test()
backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, self.fname, 'node'),

View File

@ -539,11 +539,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
start backup from replica, during backup promote replica
check that backup is failed
"""
if not self.gdb:
self.skipTest(
"Specify PGPROBACKUP_GDB and build without "
"optimizations for run this test"
)
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@ -634,11 +631,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
def test_replica_stop_lsn_null_offset(self):
"""
"""
if not self.gdb:
self.skipTest(
"Specify PGPROBACKUP_GDB and build without "
"optimizations for run this test"
)
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@ -722,11 +716,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
def test_replica_stop_lsn_null_offset_next_record(self):
"""
"""
if not self.gdb:
self.skipTest(
"Specify PGPROBACKUP_GDB and build without "
"optimizations for run this test"
)
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@ -749,7 +740,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# freeze bgwriter to get rid of RUNNING XACTS records
bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0]
gdb_checkpointer = self.gdb_attach(bgwriter_pid)
self.backup_node(backup_dir, 'master', master)
@ -828,6 +818,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
def test_archive_replica_null_offset(self):
"""
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@ -998,11 +990,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
make archive master, take full and page archive backups from master,
set replica, make archive backup from replica
"""
if not self.gdb:
self.skipTest(
"Specify PGPROBACKUP_GDB and build without "
"optimizations for run this test"
)
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@ -1104,11 +1093,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
def test_start_stop_lsn_in_the_same_segno(self):
"""
"""
if not self.gdb:
self.skipTest(
"Specify PGPROBACKUP_GDB and build without "
"optimizations for run this test"
)
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@ -1131,7 +1116,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# freeze bgwriter to get rid of RUNNING XACTS records
bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0]
gdb_checkpointer = self.gdb_attach(bgwriter_pid)
self.backup_node(backup_dir, 'master', master, options=['--stream'])

View File

@ -2379,6 +2379,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_restore_concurrent_drop_table(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -3796,6 +3798,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_concurrent_restore(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -3915,3 +3919,59 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_restore_with_waldir(self):
"""recovery using tablespace-mapping option and page backup"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
with node.connect("postgres") as con:
con.execute(
"CREATE TABLE tbl AS SELECT * "
"FROM generate_series(0,3) AS integer")
con.commit()
# Full backup
backup_id = self.backup_node(backup_dir, 'node', node)
node.stop()
node.cleanup()
# Create waldir
waldir_path = os.path.join(node.base_dir, "waldir")
os.makedirs(waldir_path)
# Test recovery from latest
self.assertIn(
"INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(
backup_dir, 'node', node,
options=[
"-X", "%s" % (waldir_path)]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
count = node.execute("postgres", "SELECT count(*) FROM tbl")
self.assertEqual(count[0][0], 4)
# check pg_wal is symlink
if node.major_version >= 10:
wal_path=os.path.join(node.data_dir, "pg_wal")
else:
wal_path=os.path.join(node.data_dir, "pg_xlog")
self.assertEqual(os.path.islink(wal_path), True)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -1499,6 +1499,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
FULL
-------window
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -1546,6 +1548,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
FULL
-------window
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -1588,6 +1592,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
def test_retention_redundancy_overlapping_chains(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -1636,6 +1642,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
def test_retention_redundancy_overlapping_chains_1(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -1744,6 +1752,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
"""
Check that retention purge works correctly with MERGING backups
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -2536,6 +2546,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
"""
https://github.com/postgrespro/pg_probackup/issues/328
"""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(

76
tests/time_consuming.py Normal file
View File

@ -0,0 +1,76 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest
import subprocess
from time import sleep
module_name = 'time_consuming'
class TimeConsumingTests(ProbackupTest, unittest.TestCase):
def test_pbckp150(self):
"""
https://jira.postgrespro.ru/browse/PBCKP-150
create a node filled with pgbench
create FULL backup followed by PTRACK backup
run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel
"""
# init node
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
node.append_conf('postgresql.conf',
"""
max_connections = 100
wal_keep_size = 16000
ptrack.map_size = 1
shared_preload_libraries='ptrack'
log_statement = 'none'
fsync = off
log_checkpoints = on
autovacuum = off
""")
# init probackup and add an instance
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
# run the node and init ptrack
node.slow_start()
node.safe_psql("postgres", "CREATE EXTENSION ptrack")
# populate it with pgbench
node.pgbench_init(scale=5)
# FULL backup followed by PTRACK backup
self.backup_node(backup_dir, 'node', node, options=['--stream'])
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
# run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel
nBenchDuration = 30
pgbench = node.pgbench(options=['-c', '20', '-j', '8', '-T', str(nBenchDuration)])
with open('/tmp/pbckp150vacuum.sql', 'w') as f:
f.write('VACUUM (FULL) pgbench_accounts, pgbench_tellers, pgbench_history; SELECT pg_sleep(1);\n')
pgbenchval = node.pgbench(options=['-c', '1', '-f', '/tmp/pbckp150vacuum.sql', '-T', str(nBenchDuration)])
# several PTRACK backups
for i in range(nBenchDuration):
print("[{}] backing up PTRACK diff...".format(i+1))
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE'])
sleep(0.1)
# if the activity pgbench has finished, stop backing up
if pgbench.poll() is not None:
break
pgbench.kill()
pgbenchval.kill()
pgbench.wait()
pgbenchval.wait()
backups = self.show_pb(backup_dir, 'node')
for b in backups:
self.assertEqual("OK", b['status'])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -2,6 +2,7 @@ import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
from pathlib import Path
import subprocess
from sys import exit
import time
@ -58,7 +59,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
with open(log_file_path) as f:
log_content = f.read()
self.assertIn(
'File: "{0}" blknum 1, empty page'.format(file),
'File: "{0}" blknum 1, empty page'.format(Path(file).as_posix()),
log_content,
'Failed to detect nullified block')
@ -1088,11 +1089,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
"""
check that interrupt during validation is handled correctly
"""
if not self.gdb:
self.skipTest(
"Specify PGPROBACKUP_GDB and build without "
"optimizations for run this test"
)
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
@ -3564,6 +3562,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_validation_after_backup(self):
""""""
self._check_gdb_flag_or_skip_test()
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -4247,4 +4247,4 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# 715 MAXALIGN(header.compressed_size), in);
# 716 if (read_len != MAXALIGN(header.compressed_size))
# -> 717 elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
# 718 blknum, file->path, read_len, header.compressed_size);
# 718 blknum, file->path, read_len, header.compressed_size);

View File

@ -47,7 +47,7 @@ cd postgres # Go to postgres dir
if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then
git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff
fi
CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests
CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls
make -s -j$(nproc) install
#make -s -j$(nproc) -C 'src/common' install
#make -s -j$(nproc) -C 'src/port' install
@ -100,11 +100,20 @@ source pyenv/bin/activate
pip3 install testgres
echo "############### Testing:"
echo PG_PROBACKUP_PARANOIA=${PG_PROBACKUP_PARANOIA}
echo ARCHIVE_COMPRESSION=${ARCHIVE_COMPRESSION}
echo PGPROBACKUPBIN_OLD=${PGPROBACKUPBIN_OLD}
echo PGPROBACKUPBIN=${PGPROBACKUPBIN}
echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE}
echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB}
echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK}
if [ "$MODE" = "basic" ]; then
export PG_PROBACKUP_TEST_BASIC=ON
echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC}
python3 -m unittest -v tests
python3 -m unittest -v tests.init
else
echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC}
python3 -m unittest -v tests.$MODE
fi