1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2024-11-28 09:33:54 +02:00

ICU: fix: run default collation tests during make (install)check-world

Thanks to Alexander Lakhin for reporting this.
This commit is contained in:
Marina Polyakova 2018-10-31 13:44:16 +03:00
commit 96b4aa791d
86 changed files with 34878 additions and 0 deletions

45
.gitignore vendored Normal file
View File

@ -0,0 +1,45 @@
# Object files
*.o
# Libraries
*.lib
*.a
# Shared objects (inc. Windows DLLs)
*.dll
*.so
*.so.*
*.dylib
# Executables
*.exe
*.app
# Dependencies
.deps
# Binaries
/pg_probackup
# Generated by test suite
/regression.diffs
/regression.out
/results
/env
/tests/__pycache__/
/tests/helpers/__pycache__/
/tests/tmp_dirs/
/tests/*pyc
/tests/helpers/*pyc
# Extra files
/src/datapagemap.c
/src/datapagemap.h
/src/logging.h
/src/receivelog.c
/src/receivelog.h
/src/streamutil.c
/src/streamutil.h
/src/xlogreader.c
/src/walmethods.c
/src/walmethods.h

7
.travis.yml Normal file
View File

@ -0,0 +1,7 @@
sudo: required
services:
- docker
script:
- docker run -v $(pwd):/tests --rm centos:7 /tests/travis/backup_restore.sh

29
COPYRIGHT Normal file
View File

@ -0,0 +1,29 @@
Copyright (c) 2015-2017, Postgres Professional
Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
Portions Copyright (c) 1994, The Regents of the University of California
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the NIPPON TELEGRAPH AND TELEPHONE CORPORATION
(NTT) nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

87
Makefile Normal file
View File

@ -0,0 +1,87 @@
PROGRAM = pg_probackup
OBJS = src/backup.o src/catalog.o src/configure.o src/data.o \
src/delete.o src/dir.o src/fetch.o src/help.o src/init.o \
src/pg_probackup.o src/restore.o src/show.o src/status.o \
src/util.o src/validate.o src/datapagemap.o src/parsexlog.o \
src/xlogreader.o src/streamutil.o src/receivelog.o \
src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o \
src/utils/json.o src/utils/thread.o src/merge.o
EXTRA_CLEAN = src/datapagemap.c src/datapagemap.h src/xlogreader.c \
src/receivelog.c src/receivelog.h src/streamutil.c src/streamutil.h src/logging.h
INCLUDES = src/datapagemap.h src/logging.h src/receivelog.h src/streamutil.h
ifdef USE_PGXS
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
# !USE_PGXS
else
subdir=contrib/pg_probackup
top_builddir=../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif # USE_PGXS
ifeq ($(top_srcdir),../..)
ifeq ($(LN_S),ln -s)
srchome=$(top_srcdir)/..
endif
else
srchome=$(top_srcdir)
endif
ifneq (,$(filter 10 11 12,$(MAJORVERSION)))
OBJS += src/walmethods.o
EXTRA_CLEAN += src/walmethods.c src/walmethods.h
INCLUDES += src/walmethods.h
endif
PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc -I$(top_srcdir)/$(subdir)/src
override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS)
PG_LIBS = $(libpq_pgport) ${PTHREAD_CFLAGS}
all: checksrcdir $(INCLUDES);
$(PROGRAM): $(OBJS)
src/xlogreader.c: $(top_srcdir)/src/backend/access/transam/xlogreader.c
rm -f $@ && $(LN_S) $(srchome)/src/backend/access/transam/xlogreader.c $@
src/datapagemap.c: $(top_srcdir)/src/bin/pg_rewind/datapagemap.c
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_rewind/datapagemap.c $@
src/datapagemap.h: $(top_srcdir)/src/bin/pg_rewind/datapagemap.h
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_rewind/datapagemap.h $@
src/logging.h: $(top_srcdir)/src/bin/pg_rewind/logging.h
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_rewind/logging.h $@
src/receivelog.c: $(top_srcdir)/src/bin/pg_basebackup/receivelog.c
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/receivelog.c $@
src/receivelog.h: $(top_srcdir)/src/bin/pg_basebackup/receivelog.h
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/receivelog.h $@
src/streamutil.c: $(top_srcdir)/src/bin/pg_basebackup/streamutil.c
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/streamutil.c $@
src/streamutil.h: $(top_srcdir)/src/bin/pg_basebackup/streamutil.h
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/streamutil.h $@
ifneq (,$(filter 10 11 12,$(MAJORVERSION)))
src/walmethods.c: $(top_srcdir)/src/bin/pg_basebackup/walmethods.c
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/walmethods.c $@
src/walmethods.h: $(top_srcdir)/src/bin/pg_basebackup/walmethods.h
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/walmethods.h $@
endif
ifeq ($(PORTNAME), aix)
CC=xlc_r
endif
# This rule's only purpose is to give the user instructions on how to pass
# the path to PostgreSQL source tree to the makefile.
.PHONY: checksrcdir
checksrcdir:
ifndef top_srcdir
@echo "You must have PostgreSQL source tree available to compile."
@echo "Pass the path to the PostgreSQL source tree to make, in the top_srcdir"
@echo "variable: \"make top_srcdir=<path to PostgreSQL source tree>\""
@exit 1
endif

100
README.md Normal file
View File

@ -0,0 +1,100 @@
# pg_probackup
`pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure.
The utility is compatible with:
* PostgreSQL 9.5, 9.6, 10;
`PTRACK` backup support provided via following options:
* vanilla PostgreSQL compiled with ptrack patch. Currently there are patches for [PostgreSQL 9.6](https://gist.githubusercontent.com/gsmol/5b615c971dfd461c76ef41a118ff4d97/raw/e471251983f14e980041f43bea7709b8246f4178/ptrack_9.6.6_v1.5.patch) and [PostgreSQL 10](https://gist.githubusercontent.com/gsmol/be8ee2a132b88463821021fd910d960e/raw/de24f9499f4f314a4a3e5fae5ed4edb945964df8/ptrack_10.1_v1.5.patch)
* Postgres Pro Standard 9.5, 9.6
* Postgres Pro Enterprise
As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data:
* Choosing between full and page-level incremental backups to speed up backup and recovery
* Implementing a single backup strategy for multi-server PostgreSQL clusters
* Automatic data consistency checks and on-demand backup validation without actual data recovery
* Managing backups in accordance with retention policy
* Running backup, restore, and validation processes on multiple parallel threads
* Storing backup data in a compressed state to save disk space
* Taking backups from a standby server to avoid extra load on the master server
* Extended logging settings
* Custom commands to simplify WAL log archiving
To manage backup data, `pg_probackup` creates a backup catalog. This directory stores all backup files with additional meta information, as well as WAL archives required for [point-in-time recovery](https://postgrespro.com/docs/postgresql/current/continuous-archiving.html). You can store backups for different instances in separate subdirectories of a single backup catalog.
Using `pg_probackup`, you can take full or incremental backups:
* `Full` backups contain all the data files required to restore the database cluster from scratch.
* `Incremental` backups only store the data that has changed since the previous backup. It allows to decrease the backup size and speed up backup operations. `pg_probackup` supports the following modes of incremental backups:
* `PAGE` backup. In this mode, `pg_probackup` scans all WAL files in the archive from the moment the previous full or incremental backup was taken. Newly created backups contain only the pages that were mentioned in WAL records. This requires all the WAL files since the previous backup to be present in the WAL archive. If the size of these files is comparable to the total size of the database cluster files, speedup is smaller, but the backup still takes less space.
* `DELTA` backup. In this mode, `pg_probackup` read all data files in PGDATA directory and only those pages, that where changed since previous backup, are copied. Continuous archiving is not necessary for it to operate. Also this mode could impose read-only I/O pressure equal to `Full` backup.
* `PTRACK` backup. In this mode, PostgreSQL tracks page changes on the fly. Continuous archiving is not necessary for it to operate. Each time a relation page is updated, this page is marked in a special `PTRACK` bitmap for this relation. As one page requires just one bit in the `PTRACK` fork, such bitmaps are quite small. Tracking implies some minor overhead on the database server operation, but speeds up incremental backups significantly.
Regardless of the chosen backup type, all backups taken with `pg_probackup` support the following archiving strategies:
* `Autonomous backups` include all the files required to restore the cluster to a consistent state at the time the backup was taken. Even if continuous archiving is not set up, the required WAL segments are included into the backup.
* `Archive backups` rely on continuous archiving. Such backups enable cluster recovery to an arbitrary point after the backup was taken (point-in-time recovery).
## Limitations
`pg_probackup` currently has the following limitations:
* Creating backups from a remote server is currently not supported.
* The server from which the backup was taken and the restored server must be compatible by the [block_size](https://postgrespro.com/docs/postgresql/current/runtime-config-preset#guc-block-size) and [wal_block_size](https://postgrespro.com/docs/postgresql/current/runtime-config-preset#guc-wal-block-size) parameters and have the same major release number.
* Microsoft Windows operating system is not supported.
* Configuration files outside of PostgreSQL data directory are not included into the backup and should be backed up separately.
## Installation and Setup
### Linux Installation
```shell
#DEB Ubuntu|Debian Packages
echo "deb [arch=amd64] http://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list
wget -O - http://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | apt-key add - && apt-get update
apt-get install pg-probackup-{10,9.6,9.5}
#DEB-SRC Packages
echo "deb-src [arch=amd64] http://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\
/etc/apt/sources.list.d/pg_probackup.list
apt-get source pg-probackup-{10,9.6,9.5}
#RPM Centos Packages
rpm -ivh http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm
yum install pg_probackup-{10,9.6,9.5}
#RPM RHEL Packages
rpm -ivh http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm
yum install pg_probackup-{10,9.6,9.5}
#RPM Oracle Linux Packages
rpm -ivh http://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm
yum install pg_probackup-{10,9.6,9.5}
#SRPM Packages
yumdownloader --source pg_probackup-{10,9.6,9.5}
```
To compile `pg_probackup`, you must have a PostgreSQL installation and raw source tree. To install `pg_probackup`, execute this in the module's directory:
```shell
make USE_PGXS=1 PG_CONFIG=<path_to_pg_config> top_srcdir=<path_to_PostgreSQL_source_tree>
```
Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.com/docs/postgrespro/current/app-pgprobackup.html#pg-probackup-install-and-setup).
## Documentation
Currently the latest documentation can be found at [Postgres Pro Enterprise documentation](https://postgrespro.com/docs/postgrespro/current/app-pgprobackup).
## Licence
This module available under the same license as [PostgreSQL](https://www.postgresql.org/about/licence/).
## Feedback
Do not hesitate to post your issues, questions and new ideas at the [issues](https://github.com/postgrespro/pg_probackup/issues) page.
## Authors
Postgres Professional, Moscow, Russia.
## Credits
`pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier.

1
doit.cmd Normal file
View File

@ -0,0 +1 @@
perl win32build.pl "C:\PgProject\pgwininstall-ee\builddir\distr_X64_10.4.1\postgresql" "C:\PgProject\pgwininstall-ee\builddir\postgresql\postgrespro-enterprise-10.4.1\src"

1
doit96.cmd Normal file
View File

@ -0,0 +1 @@
perl win32build96.pl "C:\PgPro96" "C:\PgProject\pg96ee\postgrespro\src"

190
gen_probackup_project.pl Normal file
View File

@ -0,0 +1,190 @@
# -*-perl-*- hey - emacs - this is a perl file
BEGIN{
use Cwd;
use File::Basename;
my $pgsrc="";
if (@ARGV==1)
{
$pgsrc = shift @ARGV;
if($pgsrc == "--help"){
print STDERR "Usage $0 pg-source-dir \n";
print STDERR "Like this: \n";
print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro \n";
print STDERR "May be need input this before: \n";
print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall\" amd64\n";
exit 1;
}
}
else
{
use Cwd qw(abs_path);
my $path = dirname(abs_path($0));
chdir($path);
chdir("../..");
$pgsrc = cwd();
}
chdir("$pgsrc/src/tools/msvc");
push(@INC, "$pgsrc/src/tools/msvc");
chdir("../../..") if (-d "../msvc" && -d "../../../src");
}
use Win32;
use Carp;
use strict;
use warnings;
use Project;
use Solution;
use File::Copy;
use Config;
use VSObjectFactory;
use List::Util qw(first);
use Exporter;
our (@ISA, @EXPORT_OK);
@ISA = qw(Exporter);
@EXPORT_OK = qw(Mkvcbuild);
my $solution;
my $libpgport;
my $libpgcommon;
my $libpgfeutils;
my $postgres;
my $libpq;
my @unlink_on_exit;
use lib "src/tools/msvc";
use Mkvcbuild;
# if (-e "src/tools/msvc/buildenv.pl")
# {
# do "src/tools/msvc/buildenv.pl";
# }
# elsif (-e "./buildenv.pl")
# {
# do "./buildenv.pl";
# }
# set up the project
our $config;
do "config_default.pl";
do "config.pl" if (-f "src/tools/msvc/config.pl");
# my $vcver = Mkvcbuild::mkvcbuild($config);
my $vcver = build_pgprobackup($config);
# check what sort of build we are doing
my $bconf = $ENV{CONFIG} || "Release";
my $msbflags = $ENV{MSBFLAGS} || "";
my $buildwhat = $ARGV[1] || "";
if (uc($ARGV[0]) eq 'DEBUG')
{
$bconf = "Debug";
}
elsif (uc($ARGV[0]) ne "RELEASE")
{
$buildwhat = $ARGV[0] || "";
}
# ... and do it
system("msbuild pg_probackup.vcxproj /verbosity:normal $msbflags /p:Configuration=$bconf" );
# report status
my $status = $? >> 8;
exit $status;
sub build_pgprobackup
{
our $config = shift;
chdir('../../..') if (-d '../msvc' && -d '../../../src');
die 'Must run from root or msvc directory'
unless (-d 'src/tools/msvc' && -d 'src');
# my $vsVersion = DetermineVisualStudioVersion();
my $vsVersion = '12.00';
$solution = CreateSolution($vsVersion, $config);
$libpq = $solution->AddProject('libpq', 'dll', 'interfaces',
'src/interfaces/libpq');
$libpgfeutils = $solution->AddProject('libpgfeutils', 'lib', 'misc');
$libpgcommon = $solution->AddProject('libpgcommon', 'lib', 'misc');
$libpgport = $solution->AddProject('libpgport', 'lib', 'misc');
#vvs test
my $probackup =
$solution->AddProject('pg_probackup', 'exe', 'pg_probackup'); #, 'contrib/pg_probackup'
$probackup->AddFiles(
'contrib/pg_probackup/src',
'archive.c',
'backup.c',
'catalog.c',
'configure.c',
'data.c',
'delete.c',
'dir.c',
'fetch.c',
'help.c',
'init.c',
'parsexlog.c',
'pg_probackup.c',
'restore.c',
'show.c',
'status.c',
'util.c',
'validate.c'
);
$probackup->AddFiles(
'contrib/pg_probackup/src/utils',
'json.c',
'logger.c',
'parray.c',
'pgut.c',
'thread.c'
);
$probackup->AddFile('src/backend/access/transam/xlogreader.c');
$probackup->AddFiles(
'src/bin/pg_basebackup',
'receivelog.c',
'streamutil.c'
);
if (-e 'src/bin/pg_basebackup/walmethods.c')
{
$probackup->AddFile('src/bin/pg_basebackup/walmethods.c');
}
$probackup->AddFile('src/bin/pg_rewind/datapagemap.c');
$probackup->AddFile('src/interfaces/libpq/pthread-win32.c');
$probackup->AddIncludeDir('src/bin/pg_basebackup');
$probackup->AddIncludeDir('src/bin/pg_rewind');
$probackup->AddIncludeDir('src/interfaces/libpq');
$probackup->AddIncludeDir('src');
$probackup->AddIncludeDir('src/port');
$probackup->AddIncludeDir('contrib/pg_probackup');
$probackup->AddIncludeDir('contrib/pg_probackup/src');
$probackup->AddIncludeDir('contrib/pg_probackup/src/utils');
$probackup->AddReference($libpq, $libpgfeutils, $libpgcommon, $libpgport);
$probackup->AddLibrary('ws2_32.lib');
$probackup->Save();
return $solution->{vcver};
}

28
msvs/pg_probackup.sln Normal file
View File

@ -0,0 +1,28 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Express 2013 for Windows Desktop
VisualStudioVersion = 12.0.31101.0
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "pg_probackup", "pg_probackup.vcxproj", "{4886B21A-D8CA-4A03-BADF-743B24C88327}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
Debug|x64 = Debug|x64
Release|Win32 = Release|Win32
Release|x64 = Release|x64
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|Win32.ActiveCfg = Debug|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|Win32.Build.0 = Debug|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|x64.ActiveCfg = Debug|x64
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|x64.Build.0 = Debug|x64
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|Win32.ActiveCfg = Release|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|Win32.Build.0 = Release|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|x64.ActiveCfg = Release|x64
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|x64.Build.0 = Release|x64
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal

View File

@ -0,0 +1,212 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{4886B21A-D8CA-4A03-BADF-743B24C88327}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>pg_probackup</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\walmethods.c" />
<ClCompile Include="@PGSRC@\bin\pg_rewind\datapagemap.c" />
<ClCompile Include="@PGSRC@\interfaces\libpq\pthread-win32.c" />
<ClCompile Include="..\src\archive.c" />
<ClCompile Include="..\src\backup.c" />
<ClCompile Include="..\src\catalog.c" />
<ClCompile Include="..\src\configure.c" />
<ClCompile Include="..\src\data.c" />
<ClCompile Include="..\src\delete.c" />
<ClCompile Include="..\src\dir.c" />
<ClCompile Include="..\src\fetch.c" />
<ClCompile Include="..\src\help.c" />
<ClCompile Include="..\src\init.c" />
<ClCompile Include="..\src\parsexlog.c" />
<ClCompile Include="..\src\pg_probackup.c" />
<ClCompile Include="..\src\restore.c" />
<ClCompile Include="..\src\show.c" />
<ClCompile Include="..\src\status.c" />
<ClCompile Include="..\src\util.c" />
<ClCompile Include="..\src\utils\logger.c" />
<ClCompile Include="..\src\utils\parray.c" />
<ClCompile Include="..\src\utils\pgut.c" />
<ClCompile Include="..\src\validate.c" />
<ClCompile Include="..\src\utils\json.c" />
<ClCompile Include="..\src\utils\thread.c" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="@PGSRC@\bin\pg_basebackup\receivelog.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\streamutil.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\walmethods.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\datapagemap.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\logging.h" />
<ClInclude Include="..\src\pg_probackup.h" />
<ClInclude Include="..\src\utils\logger.h" />
<ClInclude Include="..\src\utils\parray.h" />
<ClInclude Include="..\src\utils\pgut.h" />
<ClInclude Include="..\src\utils\json.h" />
<ClInclude Include="..\src\utils\thread.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -0,0 +1,210 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{4886B21A-D8CA-4A03-BADF-743B24C88327}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>pg_probackup</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
<ClCompile Include="@PGSRC@\bin\pg_rewind\datapagemap.c" />
<ClCompile Include="@PGSRC@\interfaces\libpq\pthread-win32.c" />
<ClCompile Include="..\src\archive.c" />
<ClCompile Include="..\src\backup.c" />
<ClCompile Include="..\src\catalog.c" />
<ClCompile Include="..\src\configure.c" />
<ClCompile Include="..\src\data.c" />
<ClCompile Include="..\src\delete.c" />
<ClCompile Include="..\src\dir.c" />
<ClCompile Include="..\src\fetch.c" />
<ClCompile Include="..\src\help.c" />
<ClCompile Include="..\src\init.c" />
<ClCompile Include="..\src\parsexlog.c" />
<ClCompile Include="..\src\pg_probackup.c" />
<ClCompile Include="..\src\restore.c" />
<ClCompile Include="..\src\show.c" />
<ClCompile Include="..\src\status.c" />
<ClCompile Include="..\src\util.c" />
<ClCompile Include="..\src\utils\logger.c" />
<ClCompile Include="..\src\utils\parray.c" />
<ClCompile Include="..\src\utils\pgut.c" />
<ClCompile Include="..\src\validate.c" />
<ClCompile Include="..\src\utils\json.c" />
<ClCompile Include="..\src\utils\thread.c" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="@PGSRC@\bin\pg_basebackup\receivelog.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\streamutil.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\datapagemap.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\logging.h" />
<ClInclude Include="..\src\pg_probackup.h" />
<ClInclude Include="..\src\utils\logger.h" />
<ClInclude Include="..\src\utils\parray.h" />
<ClInclude Include="..\src\utils\pgut.h" />
<ClInclude Include="..\src\utils\json.h" />
<ClInclude Include="..\src\utils\thread.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -0,0 +1,203 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{4886B21A-D8CA-4A03-BADF-743B24C88327}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>pg_probackup</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<!-- @PGROOT@\lib;@ADDLIBS@ @PGSRC@ @ADDINCLUDE@ -->
<ItemGroup>
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\walmethods.c" />
<ClCompile Include="@PGSRC@\bin\pg_rewind\datapagemap.c" />
<ClCompile Include="@PGSRC@\interfaces\libpq\pthread-win32.c" />
<ClCompile Include="..\src\archive.c" />
<ClCompile Include="..\src\backup.c" />
<ClCompile Include="..\src\catalog.c" />
<ClCompile Include="..\src\configure.c" />
<ClCompile Include="..\src\data.c" />
<ClCompile Include="..\src\delete.c" />
<ClCompile Include="..\src\dir.c" />
<ClCompile Include="..\src\fetch.c" />
<ClCompile Include="..\src\help.c" />
<ClCompile Include="..\src\init.c" />
<ClCompile Include="..\src\parsexlog.c" />
<ClCompile Include="..\src\pg_probackup.c" />
<ClCompile Include="..\src\restore.c" />
<ClCompile Include="..\src\show.c" />
<ClCompile Include="..\src\status.c" />
<ClCompile Include="..\src\util.c" />
<ClCompile Include="..\src\utils\logger.c" />
<ClCompile Include="..\src\utils\parray.c" />
<ClCompile Include="..\src\utils\pgut.c" />
<ClCompile Include="..\src\validate.c" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="@PGSRC@\bin\pg_basebackup\receivelog.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\streamutil.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\walmethods.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\datapagemap.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\logging.h" />
<ClInclude Include="..\src\pg_probackup.h" />
<ClInclude Include="..\src\utils\logger.h" />
<ClInclude Include="..\src\utils\parray.h" />
<ClInclude Include="..\src\utils\pgut.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

113
src/archive.c Normal file
View File

@ -0,0 +1,113 @@
/*-------------------------------------------------------------------------
*
* archive.c: - pg_probackup specific archive commands for archive backups.
*
*
* Portions Copyright (c) 2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <unistd.h>
#include <sys/stat.h>
/*
* pg_probackup specific archive command for archive backups
* set archive_command = 'pg_probackup archive-push -B /home/anastasia/backup
* --wal-file-path %p --wal-file-name %f', to move backups into arclog_path.
* Where archlog_path is $BACKUP_PATH/wal/system_id.
* Currently it just copies wal files to the new location.
* TODO: Planned options: list the arclog content,
* compute and validate checksums.
*/
int
do_archive_push(char *wal_file_path, char *wal_file_name, bool overwrite)
{
char backup_wal_file_path[MAXPGPATH];
char absolute_wal_file_path[MAXPGPATH];
char current_dir[MAXPGPATH];
int64 system_id;
pgBackupConfig *config;
bool is_compress = false;
if (wal_file_name == NULL && wal_file_path == NULL)
elog(ERROR, "required parameters are not specified: --wal-file-name %%f --wal-file-path %%p");
if (wal_file_name == NULL)
elog(ERROR, "required parameter not specified: --wal-file-name %%f");
if (wal_file_path == NULL)
elog(ERROR, "required parameter not specified: --wal-file-path %%p");
if (!getcwd(current_dir, sizeof(current_dir)))
elog(ERROR, "getcwd() error");
/* verify that archive-push --instance parameter is valid */
config = readBackupCatalogConfigFile();
system_id = get_system_identifier(current_dir);
if (config->pgdata == NULL)
elog(ERROR, "cannot read pg_probackup.conf for this instance");
if(system_id != config->system_identifier)
elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch."
"Instance '%s' should have SYSTEM_ID = " INT64_FORMAT " instead of " INT64_FORMAT,
wal_file_name, instance_name, config->system_identifier, system_id);
/* Create 'archlog_path' directory. Do nothing if it already exists. */
dir_create_dir(arclog_path, DIR_PERMISSION);
join_path_components(absolute_wal_file_path, current_dir, wal_file_path);
join_path_components(backup_wal_file_path, arclog_path, wal_file_name);
elog(INFO, "pg_probackup archive-push from %s to %s", absolute_wal_file_path, backup_wal_file_path);
if (compress_alg == PGLZ_COMPRESS)
elog(ERROR, "pglz compression is not supported");
#ifdef HAVE_LIBZ
if (compress_alg == ZLIB_COMPRESS)
is_compress = IsXLogFileName(wal_file_name);
#endif
push_wal_file(absolute_wal_file_path, backup_wal_file_path, is_compress,
overwrite);
elog(INFO, "pg_probackup archive-push completed successfully");
return 0;
}
/*
* pg_probackup specific restore command.
* Move files from arclog_path to pgdata/wal_file_path.
*/
int
do_archive_get(char *wal_file_path, char *wal_file_name)
{
char backup_wal_file_path[MAXPGPATH];
char absolute_wal_file_path[MAXPGPATH];
char current_dir[MAXPGPATH];
if (wal_file_name == NULL && wal_file_path == NULL)
elog(ERROR, "required parameters are not specified: --wal-file-name %%f --wal-file-path %%p");
if (wal_file_name == NULL)
elog(ERROR, "required parameter not specified: --wal-file-name %%f");
if (wal_file_path == NULL)
elog(ERROR, "required parameter not specified: --wal-file-path %%p");
if (!getcwd(current_dir, sizeof(current_dir)))
elog(ERROR, "getcwd() error");
join_path_components(absolute_wal_file_path, current_dir, wal_file_path);
join_path_components(backup_wal_file_path, arclog_path, wal_file_name);
elog(INFO, "pg_probackup archive-get from %s to %s",
backup_wal_file_path, absolute_wal_file_path);
get_wal_file(backup_wal_file_path, absolute_wal_file_path);
elog(INFO, "pg_probackup archive-get completed successfully");
return 0;
}

2701
src/backup.c Normal file

File diff suppressed because it is too large Load Diff

915
src/catalog.c Normal file
View File

@ -0,0 +1,915 @@
/*-------------------------------------------------------------------------
*
* catalog.c: backup catalog operation
*
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <dirent.h>
#include <fcntl.h>
#include <signal.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"};
static pgBackup *readBackupControlFile(const char *path);
static bool exit_hook_registered = false;
static char lock_file[MAXPGPATH];
static void
unlink_lock_atexit(void)
{
int res;
res = unlink(lock_file);
if (res != 0 && res != ENOENT)
elog(WARNING, "%s: %s", lock_file, strerror(errno));
}
/*
* Create a lockfile.
*/
void
catalog_lock(void)
{
int fd;
char buffer[MAXPGPATH * 2 + 256];
int ntries;
int len;
int encoded_pid;
pid_t my_pid,
my_p_pid;
join_path_components(lock_file, backup_instance_path, BACKUP_CATALOG_PID);
/*
* If the PID in the lockfile is our own PID or our parent's or
* grandparent's PID, then the file must be stale (probably left over from
* a previous system boot cycle). We need to check this because of the
* likelihood that a reboot will assign exactly the same PID as we had in
* the previous reboot, or one that's only one or two counts larger and
* hence the lockfile's PID now refers to an ancestor shell process. We
* allow pg_ctl to pass down its parent shell PID (our grandparent PID)
* via the environment variable PG_GRANDPARENT_PID; this is so that
* launching the postmaster via pg_ctl can be just as reliable as
* launching it directly. There is no provision for detecting
* further-removed ancestor processes, but if the init script is written
* carefully then all but the immediate parent shell will be root-owned
* processes and so the kill test will fail with EPERM. Note that we
* cannot get a false negative this way, because an existing postmaster
* would surely never launch a competing postmaster or pg_ctl process
* directly.
*/
my_pid = getpid();
#ifndef WIN32
my_p_pid = getppid();
#else
/*
* Windows hasn't got getppid(), but doesn't need it since it's not using
* real kill() either...
*/
my_p_pid = 0;
#endif
/*
* We need a loop here because of race conditions. But don't loop forever
* (for example, a non-writable $backup_instance_path directory might cause a failure
* that won't go away). 100 tries seems like plenty.
*/
for (ntries = 0;; ntries++)
{
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
* Think not to make the file protection weaker than 0600. See
* comments below.
*/
fd = open(lock_file, O_RDWR | O_CREAT | O_EXCL, 0600);
if (fd >= 0)
break; /* Success; exit the retry loop */
/*
* Couldn't create the pid file. Probably it already exists.
*/
if ((errno != EEXIST && errno != EACCES) || ntries > 100)
elog(ERROR, "could not create lock file \"%s\": %s",
lock_file, strerror(errno));
/*
* Read the file to get the old owner's PID. Note race condition
* here: file might have been deleted since we tried to create it.
*/
fd = open(lock_file, O_RDONLY, 0600);
if (fd < 0)
{
if (errno == ENOENT)
continue; /* race condition; try again */
elog(ERROR, "could not open lock file \"%s\": %s",
lock_file, strerror(errno));
}
if ((len = read(fd, buffer, sizeof(buffer) - 1)) < 0)
elog(ERROR, "could not read lock file \"%s\": %s",
lock_file, strerror(errno));
close(fd);
if (len == 0)
elog(ERROR, "lock file \"%s\" is empty", lock_file);
buffer[len] = '\0';
encoded_pid = atoi(buffer);
if (encoded_pid <= 0)
elog(ERROR, "bogus data in lock file \"%s\": \"%s\"",
lock_file, buffer);
/*
* Check to see if the other process still exists
*
* Per discussion above, my_pid, my_p_pid can be
* ignored as false matches.
*
* Normally kill() will fail with ESRCH if the given PID doesn't
* exist.
*/
if (encoded_pid != my_pid && encoded_pid != my_p_pid)
{
if (kill(encoded_pid, 0) == 0 ||
(errno != ESRCH && errno != EPERM))
elog(ERROR, "lock file \"%s\" already exists", lock_file);
}
/*
* Looks like nobody's home. Unlink the file and try again to create
* it. Need a loop because of possible race condition against other
* would-be creators.
*/
if (unlink(lock_file) < 0)
elog(ERROR, "could not remove old lock file \"%s\": %s",
lock_file, strerror(errno));
}
/*
* Successfully created the file, now fill it.
*/
snprintf(buffer, sizeof(buffer), "%d\n", my_pid);
errno = 0;
if (write(fd, buffer, strlen(buffer)) != strlen(buffer))
{
int save_errno = errno;
close(fd);
unlink(lock_file);
/* if write didn't set errno, assume problem is no disk space */
errno = save_errno ? save_errno : ENOSPC;
elog(ERROR, "could not write lock file \"%s\": %s",
lock_file, strerror(errno));
}
if (fsync(fd) != 0)
{
int save_errno = errno;
close(fd);
unlink(lock_file);
errno = save_errno;
elog(ERROR, "could not write lock file \"%s\": %s",
lock_file, strerror(errno));
}
if (close(fd) != 0)
{
int save_errno = errno;
unlink(lock_file);
errno = save_errno;
elog(ERROR, "could not write lock file \"%s\": %s",
lock_file, strerror(errno));
}
/*
* Arrange to unlink the lock file(s) at proc_exit.
*/
if (!exit_hook_registered)
{
atexit(unlink_lock_atexit);
exit_hook_registered = true;
}
}
/*
* Read backup meta information from BACKUP_CONTROL_FILE.
* If no backup matches, return NULL.
*/
pgBackup *
read_backup(time_t timestamp)
{
pgBackup tmp;
char conf_path[MAXPGPATH];
tmp.start_time = timestamp;
pgBackupGetPath(&tmp, conf_path, lengthof(conf_path), BACKUP_CONTROL_FILE);
return readBackupControlFile(conf_path);
}
/*
* Get backup_mode in string representation.
*/
const char *
pgBackupGetBackupMode(pgBackup *backup)
{
return backupModes[backup->backup_mode];
}
static bool
IsDir(const char *dirpath, const char *entry)
{
char path[MAXPGPATH];
struct stat st;
snprintf(path, MAXPGPATH, "%s/%s", dirpath, entry);
return stat(path, &st) == 0 && S_ISDIR(st.st_mode);
}
/*
* Create list of backups.
* If 'requested_backup_id' is INVALID_BACKUP_ID, return list of all backups.
* The list is sorted in order of descending start time.
* If valid backup id is passed only matching backup will be added to the list.
*/
parray *
catalog_get_backup_list(time_t requested_backup_id)
{
DIR *data_dir = NULL;
struct dirent *data_ent = NULL;
parray *backups = NULL;
pgBackup *backup = NULL;
int i;
/* open backup instance backups directory */
data_dir = opendir(backup_instance_path);
if (data_dir == NULL)
{
elog(WARNING, "cannot open directory \"%s\": %s", backup_instance_path,
strerror(errno));
goto err_proc;
}
/* scan the directory and list backups */
backups = parray_new();
for (; (data_ent = readdir(data_dir)) != NULL; errno = 0)
{
char backup_conf_path[MAXPGPATH];
char data_path[MAXPGPATH];
/* skip not-directory entries and hidden entries */
if (!IsDir(backup_instance_path, data_ent->d_name)
|| data_ent->d_name[0] == '.')
continue;
/* open subdirectory of specific backup */
join_path_components(data_path, backup_instance_path, data_ent->d_name);
/* read backup information from BACKUP_CONTROL_FILE */
snprintf(backup_conf_path, MAXPGPATH, "%s/%s", data_path, BACKUP_CONTROL_FILE);
backup = readBackupControlFile(backup_conf_path);
/* ignore corrupted backups */
if (backup)
{
backup->backup_id = backup->start_time;
if (requested_backup_id != INVALID_BACKUP_ID
&& requested_backup_id != backup->start_time)
{
pgBackupFree(backup);
continue;
}
parray_append(backups, backup);
backup = NULL;
}
if (errno && errno != ENOENT)
{
elog(WARNING, "cannot read data directory \"%s\": %s",
data_ent->d_name, strerror(errno));
goto err_proc;
}
}
if (errno)
{
elog(WARNING, "cannot read backup root directory \"%s\": %s",
backup_instance_path, strerror(errno));
goto err_proc;
}
closedir(data_dir);
data_dir = NULL;
parray_qsort(backups, pgBackupCompareIdDesc);
/* Link incremental backups with their ancestors.*/
for (i = 0; i < parray_num(backups); i++)
{
pgBackup *curr = parray_get(backups, i);
int j;
if (curr->backup_mode == BACKUP_MODE_FULL)
continue;
for (j = i+1; j < parray_num(backups); j++)
{
pgBackup *ancestor = parray_get(backups, j);
if (ancestor->start_time == curr->parent_backup)
{
curr->parent_backup_link = ancestor;
/* elog(INFO, "curr %s, ancestor %s j=%d", base36enc_dup(curr->start_time),
base36enc_dup(ancestor->start_time), j); */
break;
}
}
}
return backups;
err_proc:
if (data_dir)
closedir(data_dir);
if (backup)
pgBackupFree(backup);
if (backups)
parray_walk(backups, pgBackupFree);
parray_free(backups);
elog(ERROR, "Failed to get backup list");
return NULL;
}
/*
* Find the last completed backup on given timeline
*/
pgBackup *
catalog_get_last_data_backup(parray *backup_list, TimeLineID tli)
{
int i;
pgBackup *backup = NULL;
/* backup_list is sorted in order of descending ID */
for (i = 0; i < parray_num(backup_list); i++)
{
backup = (pgBackup *) parray_get(backup_list, (size_t) i);
if (backup->status == BACKUP_STATUS_OK && backup->tli == tli)
return backup;
}
return NULL;
}
/* create backup directory in $BACKUP_PATH */
int
pgBackupCreateDir(pgBackup *backup)
{
int i;
char path[MAXPGPATH];
char *subdirs[] = { DATABASE_DIR, NULL };
pgBackupGetPath(backup, path, lengthof(path), NULL);
if (!dir_is_empty(path))
elog(ERROR, "backup destination is not empty \"%s\"", path);
dir_create_dir(path, DIR_PERMISSION);
/* create directories for actual backup files */
for (i = 0; subdirs[i]; i++)
{
pgBackupGetPath(backup, path, lengthof(path), subdirs[i]);
dir_create_dir(path, DIR_PERMISSION);
}
return 0;
}
/*
* Write information about backup.in to stream "out".
*/
void
pgBackupWriteControl(FILE *out, pgBackup *backup)
{
char timestamp[100];
fprintf(out, "#Configuration\n");
fprintf(out, "backup-mode = %s\n", pgBackupGetBackupMode(backup));
fprintf(out, "stream = %s\n", backup->stream ? "true" : "false");
fprintf(out, "compress-alg = %s\n",
deparse_compress_alg(backup->compress_alg));
fprintf(out, "compress-level = %d\n", backup->compress_level);
fprintf(out, "from-replica = %s\n", backup->from_replica ? "true" : "false");
fprintf(out, "\n#Compatibility\n");
fprintf(out, "block-size = %u\n", backup->block_size);
fprintf(out, "xlog-block-size = %u\n", backup->wal_block_size);
fprintf(out, "checksum-version = %u\n", backup->checksum_version);
fprintf(out, "program-version = %s\n", PROGRAM_VERSION);
if (backup->server_version[0] != '\0')
fprintf(out, "server-version = %s\n", backup->server_version);
fprintf(out, "\n#Result backup info\n");
fprintf(out, "timelineid = %d\n", backup->tli);
/* LSN returned by pg_start_backup */
fprintf(out, "start-lsn = %X/%X\n",
(uint32) (backup->start_lsn >> 32),
(uint32) backup->start_lsn);
/* LSN returned by pg_stop_backup */
fprintf(out, "stop-lsn = %X/%X\n",
(uint32) (backup->stop_lsn >> 32),
(uint32) backup->stop_lsn);
time2iso(timestamp, lengthof(timestamp), backup->start_time);
fprintf(out, "start-time = '%s'\n", timestamp);
if (backup->end_time > 0)
{
time2iso(timestamp, lengthof(timestamp), backup->end_time);
fprintf(out, "end-time = '%s'\n", timestamp);
}
fprintf(out, "recovery-xid = " XID_FMT "\n", backup->recovery_xid);
if (backup->recovery_time > 0)
{
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
fprintf(out, "recovery-time = '%s'\n", timestamp);
}
/*
* Size of PGDATA directory. The size does not include size of related
* WAL segments in archive 'wal' directory.
*/
if (backup->data_bytes != BYTES_INVALID)
fprintf(out, "data-bytes = " INT64_FORMAT "\n", backup->data_bytes);
if (backup->wal_bytes != BYTES_INVALID)
fprintf(out, "wal-bytes = " INT64_FORMAT "\n", backup->wal_bytes);
fprintf(out, "status = %s\n", status2str(backup->status));
/* 'parent_backup' is set if it is incremental backup */
if (backup->parent_backup != 0)
fprintf(out, "parent-backup-id = '%s'\n", base36enc(backup->parent_backup));
/* print connection info except password */
if (backup->primary_conninfo)
fprintf(out, "primary_conninfo = '%s'\n", backup->primary_conninfo);
}
/* create BACKUP_CONTROL_FILE */
void
pgBackupWriteBackupControlFile(pgBackup *backup)
{
FILE *fp = NULL;
char ini_path[MAXPGPATH];
pgBackupGetPath(backup, ini_path, lengthof(ini_path), BACKUP_CONTROL_FILE);
fp = fopen(ini_path, "wt");
if (fp == NULL)
elog(ERROR, "cannot open configuration file \"%s\": %s", ini_path,
strerror(errno));
pgBackupWriteControl(fp, backup);
fclose(fp);
}
/*
* Output the list of files to backup catalog DATABASE_FILE_LIST
*/
void
pgBackupWriteFileList(pgBackup *backup, parray *files, const char *root)
{
FILE *fp;
char path[MAXPGPATH];
pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST);
fp = fopen(path, "wt");
if (fp == NULL)
elog(ERROR, "cannot open file list \"%s\": %s", path,
strerror(errno));
print_file_list(fp, files, root);
if (fflush(fp) != 0 ||
fsync(fileno(fp)) != 0 ||
fclose(fp))
elog(ERROR, "cannot write file list \"%s\": %s", path, strerror(errno));
}
/*
* Read BACKUP_CONTROL_FILE and create pgBackup.
* - Comment starts with ';'.
* - Do not care section.
*/
static pgBackup *
readBackupControlFile(const char *path)
{
pgBackup *backup = pgut_new(pgBackup);
char *backup_mode = NULL;
char *start_lsn = NULL;
char *stop_lsn = NULL;
char *status = NULL;
char *parent_backup = NULL;
char *program_version = NULL;
char *server_version = NULL;
char *compress_alg = NULL;
int parsed_options;
pgut_option options[] =
{
{'s', 0, "backup-mode", &backup_mode, SOURCE_FILE_STRICT},
{'u', 0, "timelineid", &backup->tli, SOURCE_FILE_STRICT},
{'s', 0, "start-lsn", &start_lsn, SOURCE_FILE_STRICT},
{'s', 0, "stop-lsn", &stop_lsn, SOURCE_FILE_STRICT},
{'t', 0, "start-time", &backup->start_time, SOURCE_FILE_STRICT},
{'t', 0, "end-time", &backup->end_time, SOURCE_FILE_STRICT},
{'U', 0, "recovery-xid", &backup->recovery_xid, SOURCE_FILE_STRICT},
{'t', 0, "recovery-time", &backup->recovery_time, SOURCE_FILE_STRICT},
{'I', 0, "data-bytes", &backup->data_bytes, SOURCE_FILE_STRICT},
{'I', 0, "wal-bytes", &backup->wal_bytes, SOURCE_FILE_STRICT},
{'u', 0, "block-size", &backup->block_size, SOURCE_FILE_STRICT},
{'u', 0, "xlog-block-size", &backup->wal_block_size, SOURCE_FILE_STRICT},
{'u', 0, "checksum-version", &backup->checksum_version, SOURCE_FILE_STRICT},
{'s', 0, "program-version", &program_version, SOURCE_FILE_STRICT},
{'s', 0, "server-version", &server_version, SOURCE_FILE_STRICT},
{'b', 0, "stream", &backup->stream, SOURCE_FILE_STRICT},
{'s', 0, "status", &status, SOURCE_FILE_STRICT},
{'s', 0, "parent-backup-id", &parent_backup, SOURCE_FILE_STRICT},
{'s', 0, "compress-alg", &compress_alg, SOURCE_FILE_STRICT},
{'u', 0, "compress-level", &backup->compress_level, SOURCE_FILE_STRICT},
{'b', 0, "from-replica", &backup->from_replica, SOURCE_FILE_STRICT},
{'s', 0, "primary-conninfo", &backup->primary_conninfo, SOURCE_FILE_STRICT},
{0}
};
if (access(path, F_OK) != 0)
{
elog(WARNING, "Control file \"%s\" doesn't exist", path);
pgBackupFree(backup);
return NULL;
}
pgBackupInit(backup);
parsed_options = pgut_readopt(path, options, WARNING, true);
if (parsed_options == 0)
{
elog(WARNING, "Control file \"%s\" is empty", path);
pgBackupFree(backup);
return NULL;
}
if (backup->start_time == 0)
{
elog(WARNING, "Invalid ID/start-time, control file \"%s\" is corrupted", path);
pgBackupFree(backup);
return NULL;
}
if (backup_mode)
{
backup->backup_mode = parse_backup_mode(backup_mode);
free(backup_mode);
}
if (start_lsn)
{
uint32 xlogid;
uint32 xrecoff;
if (sscanf(start_lsn, "%X/%X", &xlogid, &xrecoff) == 2)
backup->start_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
else
elog(WARNING, "Invalid START_LSN \"%s\"", start_lsn);
free(start_lsn);
}
if (stop_lsn)
{
uint32 xlogid;
uint32 xrecoff;
if (sscanf(stop_lsn, "%X/%X", &xlogid, &xrecoff) == 2)
backup->stop_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
else
elog(WARNING, "Invalid STOP_LSN \"%s\"", stop_lsn);
free(stop_lsn);
}
if (status)
{
if (strcmp(status, "OK") == 0)
backup->status = BACKUP_STATUS_OK;
else if (strcmp(status, "ERROR") == 0)
backup->status = BACKUP_STATUS_ERROR;
else if (strcmp(status, "RUNNING") == 0)
backup->status = BACKUP_STATUS_RUNNING;
else if (strcmp(status, "MERGING") == 0)
backup->status = BACKUP_STATUS_MERGING;
else if (strcmp(status, "DELETING") == 0)
backup->status = BACKUP_STATUS_DELETING;
else if (strcmp(status, "DELETED") == 0)
backup->status = BACKUP_STATUS_DELETED;
else if (strcmp(status, "DONE") == 0)
backup->status = BACKUP_STATUS_DONE;
else if (strcmp(status, "ORPHAN") == 0)
backup->status = BACKUP_STATUS_ORPHAN;
else if (strcmp(status, "CORRUPT") == 0)
backup->status = BACKUP_STATUS_CORRUPT;
else
elog(WARNING, "Invalid STATUS \"%s\"", status);
free(status);
}
if (parent_backup)
{
backup->parent_backup = base36dec(parent_backup);
free(parent_backup);
}
if (program_version)
{
StrNCpy(backup->program_version, program_version,
sizeof(backup->program_version));
pfree(program_version);
}
if (server_version)
{
StrNCpy(backup->server_version, server_version,
sizeof(backup->server_version));
pfree(server_version);
}
if (compress_alg)
backup->compress_alg = parse_compress_alg(compress_alg);
return backup;
}
BackupMode
parse_backup_mode(const char *value)
{
const char *v = value;
size_t len;
/* Skip all spaces detected */
while (IsSpace(*v))
v++;
len = strlen(v);
if (len > 0 && pg_strncasecmp("full", v, len) == 0)
return BACKUP_MODE_FULL;
else if (len > 0 && pg_strncasecmp("page", v, len) == 0)
return BACKUP_MODE_DIFF_PAGE;
else if (len > 0 && pg_strncasecmp("ptrack", v, len) == 0)
return BACKUP_MODE_DIFF_PTRACK;
else if (len > 0 && pg_strncasecmp("delta", v, len) == 0)
return BACKUP_MODE_DIFF_DELTA;
/* Backup mode is invalid, so leave with an error */
elog(ERROR, "invalid backup-mode \"%s\"", value);
return BACKUP_MODE_INVALID;
}
const char *
deparse_backup_mode(BackupMode mode)
{
switch (mode)
{
case BACKUP_MODE_FULL:
return "full";
case BACKUP_MODE_DIFF_PAGE:
return "page";
case BACKUP_MODE_DIFF_PTRACK:
return "ptrack";
case BACKUP_MODE_DIFF_DELTA:
return "delta";
case BACKUP_MODE_INVALID:
return "invalid";
}
return NULL;
}
CompressAlg
parse_compress_alg(const char *arg)
{
size_t len;
/* Skip all spaces detected */
while (isspace((unsigned char)*arg))
arg++;
len = strlen(arg);
if (len == 0)
elog(ERROR, "compress algrorithm is empty");
if (pg_strncasecmp("zlib", arg, len) == 0)
return ZLIB_COMPRESS;
else if (pg_strncasecmp("pglz", arg, len) == 0)
return PGLZ_COMPRESS;
else if (pg_strncasecmp("none", arg, len) == 0)
return NONE_COMPRESS;
else
elog(ERROR, "invalid compress algorithm value \"%s\"", arg);
return NOT_DEFINED_COMPRESS;
}
const char*
deparse_compress_alg(int alg)
{
switch (alg)
{
case NONE_COMPRESS:
case NOT_DEFINED_COMPRESS:
return "none";
case ZLIB_COMPRESS:
return "zlib";
case PGLZ_COMPRESS:
return "pglz";
}
return NULL;
}
/*
* Fill pgBackup struct with default values.
*/
void
pgBackupInit(pgBackup *backup)
{
backup->backup_id = INVALID_BACKUP_ID;
backup->backup_mode = BACKUP_MODE_INVALID;
backup->status = BACKUP_STATUS_INVALID;
backup->tli = 0;
backup->start_lsn = 0;
backup->stop_lsn = 0;
backup->start_time = (time_t) 0;
backup->end_time = (time_t) 0;
backup->recovery_xid = 0;
backup->recovery_time = (time_t) 0;
backup->data_bytes = BYTES_INVALID;
backup->wal_bytes = BYTES_INVALID;
backup->compress_alg = COMPRESS_ALG_DEFAULT;
backup->compress_level = COMPRESS_LEVEL_DEFAULT;
backup->block_size = BLCKSZ;
backup->wal_block_size = XLOG_BLCKSZ;
backup->checksum_version = 0;
backup->stream = false;
backup->from_replica = false;
backup->parent_backup = INVALID_BACKUP_ID;
backup->parent_backup_link = NULL;
backup->primary_conninfo = NULL;
backup->program_version[0] = '\0';
backup->server_version[0] = '\0';
}
/*
* Copy backup metadata from **src** into **dst**.
*/
void
pgBackupCopy(pgBackup *dst, pgBackup *src)
{
pfree(dst->primary_conninfo);
memcpy(dst, src, sizeof(pgBackup));
if (src->primary_conninfo)
dst->primary_conninfo = pstrdup(src->primary_conninfo);
}
/* free pgBackup object */
void
pgBackupFree(void *backup)
{
pgBackup *b = (pgBackup *) backup;
pfree(b->primary_conninfo);
pfree(backup);
}
/* Compare two pgBackup with their IDs (start time) in ascending order */
int
pgBackupCompareId(const void *l, const void *r)
{
pgBackup *lp = *(pgBackup **)l;
pgBackup *rp = *(pgBackup **)r;
if (lp->start_time > rp->start_time)
return 1;
else if (lp->start_time < rp->start_time)
return -1;
else
return 0;
}
/* Compare two pgBackup with their IDs in descending order */
int
pgBackupCompareIdDesc(const void *l, const void *r)
{
return -pgBackupCompareId(l, r);
}
/*
* Construct absolute path of the backup directory.
* If subdir is not NULL, it will be appended after the path.
*/
void
pgBackupGetPath(const pgBackup *backup, char *path, size_t len, const char *subdir)
{
pgBackupGetPath2(backup, path, len, subdir, NULL);
}
/*
* Construct absolute path of the backup directory.
* Append "subdir1" and "subdir2" to the backup directory.
*/
void
pgBackupGetPath2(const pgBackup *backup, char *path, size_t len,
const char *subdir1, const char *subdir2)
{
/* If "subdir1" is NULL do not check "subdir2" */
if (!subdir1)
snprintf(path, len, "%s/%s", backup_instance_path,
base36enc(backup->start_time));
else if (!subdir2)
snprintf(path, len, "%s/%s/%s", backup_instance_path,
base36enc(backup->start_time), subdir1);
/* "subdir1" and "subdir2" is not NULL */
else
snprintf(path, len, "%s/%s/%s/%s", backup_instance_path,
base36enc(backup->start_time), subdir1, subdir2);
make_native_path(path);
}
/* Find parent base FULL backup for current backup using parent_backup_link,
* return NULL if not found
*/
pgBackup*
find_parent_backup(pgBackup *current_backup)
{
pgBackup *base_full_backup = NULL;
base_full_backup = current_backup;
while (base_full_backup->backup_mode != BACKUP_MODE_FULL)
{
/*
* If we haven't found parent for incremental backup,
* mark it and all depending backups as orphaned
*/
if (base_full_backup->parent_backup_link == NULL
|| (base_full_backup->status != BACKUP_STATUS_OK
&& base_full_backup->status != BACKUP_STATUS_DONE))
{
pgBackup *orphaned_backup = current_backup;
while (orphaned_backup != NULL)
{
orphaned_backup->status = BACKUP_STATUS_ORPHAN;
pgBackupWriteBackupControlFile(orphaned_backup);
if (base_full_backup->parent_backup_link == NULL)
elog(WARNING, "Backup %s is orphaned because its parent backup is not found",
base36enc(orphaned_backup->start_time));
else
elog(WARNING, "Backup %s is orphaned because its parent backup is corrupted",
base36enc(orphaned_backup->start_time));
orphaned_backup = orphaned_backup->parent_backup_link;
}
base_full_backup = NULL;
break;
}
base_full_backup = base_full_backup->parent_backup_link;
}
return base_full_backup;
}

490
src/configure.c Normal file
View File

@ -0,0 +1,490 @@
/*-------------------------------------------------------------------------
*
* configure.c: - manage backup catalog.
*
* Copyright (c) 2017-2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include "utils/logger.h"
#include "pqexpbuffer.h"
#include "utils/json.h"
static void opt_log_level_console(pgut_option *opt, const char *arg);
static void opt_log_level_file(pgut_option *opt, const char *arg);
static void opt_compress_alg(pgut_option *opt, const char *arg);
static void show_configure_start(void);
static void show_configure_end(void);
static void show_configure(pgBackupConfig *config);
static void show_configure_json(pgBackupConfig *config);
static pgBackupConfig *cur_config = NULL;
static PQExpBufferData show_buf;
static int32 json_level = 0;
/*
* All this code needs refactoring.
*/
/* Set configure options */
int
do_configure(bool show_only)
{
pgBackupConfig *config = readBackupCatalogConfigFile();
if (pgdata)
config->pgdata = pgdata;
if (pgut_dbname)
config->pgdatabase = pgut_dbname;
if (host)
config->pghost = host;
if (port)
config->pgport = port;
if (username)
config->pguser = username;
if (master_host)
config->master_host = master_host;
if (master_port)
config->master_port = master_port;
if (master_db)
config->master_db = master_db;
if (master_user)
config->master_user = master_user;
if (replica_timeout)
config->replica_timeout = replica_timeout;
if (archive_timeout)
config->archive_timeout = archive_timeout;
if (log_level_console)
config->log_level_console = log_level_console;
if (log_level_file)
config->log_level_file = log_level_file;
if (log_filename)
config->log_filename = log_filename;
if (error_log_filename)
config->error_log_filename = error_log_filename;
if (log_directory)
config->log_directory = log_directory;
if (log_rotation_size)
config->log_rotation_size = log_rotation_size;
if (log_rotation_age)
config->log_rotation_age = log_rotation_age;
if (retention_redundancy)
config->retention_redundancy = retention_redundancy;
if (retention_window)
config->retention_window = retention_window;
if (compress_alg)
config->compress_alg = compress_alg;
if (compress_level)
config->compress_level = compress_level;
if (show_only)
show_configure(config);
else
writeBackupCatalogConfigFile(config);
return 0;
}
void
pgBackupConfigInit(pgBackupConfig *config)
{
config->system_identifier = 0;
#if PG_VERSION_NUM >= 110000
config->xlog_seg_size = 0;
#else
config->xlog_seg_size = XLOG_SEG_SIZE;
#endif
config->pgdata = NULL;
config->pgdatabase = NULL;
config->pghost = NULL;
config->pgport = NULL;
config->pguser = NULL;
config->master_host = NULL;
config->master_port = NULL;
config->master_db = NULL;
config->master_user = NULL;
config->replica_timeout = REPLICA_TIMEOUT_DEFAULT;
config->archive_timeout = ARCHIVE_TIMEOUT_DEFAULT;
config->log_level_console = LOG_LEVEL_CONSOLE_DEFAULT;
config->log_level_file = LOG_LEVEL_FILE_DEFAULT;
config->log_filename = LOG_FILENAME_DEFAULT;
config->error_log_filename = NULL;
config->log_directory = LOG_DIRECTORY_DEFAULT;
config->log_rotation_size = LOG_ROTATION_SIZE_DEFAULT;
config->log_rotation_age = LOG_ROTATION_AGE_DEFAULT;
config->retention_redundancy = RETENTION_REDUNDANCY_DEFAULT;
config->retention_window = RETENTION_WINDOW_DEFAULT;
config->compress_alg = COMPRESS_ALG_DEFAULT;
config->compress_level = COMPRESS_LEVEL_DEFAULT;
}
void
writeBackupCatalogConfig(FILE *out, pgBackupConfig *config)
{
uint64 res;
const char *unit;
fprintf(out, "#Backup instance info\n");
fprintf(out, "PGDATA = %s\n", config->pgdata);
fprintf(out, "system-identifier = " UINT64_FORMAT "\n", config->system_identifier);
#if PG_VERSION_NUM >= 110000
fprintf(out, "xlog-seg-size = %u\n", config->xlog_seg_size);
#endif
fprintf(out, "#Connection parameters:\n");
if (config->pgdatabase)
fprintf(out, "PGDATABASE = %s\n", config->pgdatabase);
if (config->pghost)
fprintf(out, "PGHOST = %s\n", config->pghost);
if (config->pgport)
fprintf(out, "PGPORT = %s\n", config->pgport);
if (config->pguser)
fprintf(out, "PGUSER = %s\n", config->pguser);
fprintf(out, "#Replica parameters:\n");
if (config->master_host)
fprintf(out, "master-host = %s\n", config->master_host);
if (config->master_port)
fprintf(out, "master-port = %s\n", config->master_port);
if (config->master_db)
fprintf(out, "master-db = %s\n", config->master_db);
if (config->master_user)
fprintf(out, "master-user = %s\n", config->master_user);
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_S,
&res, &unit);
fprintf(out, "replica-timeout = " UINT64_FORMAT "%s\n", res, unit);
fprintf(out, "#Archive parameters:\n");
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_S,
&res, &unit);
fprintf(out, "archive-timeout = " UINT64_FORMAT "%s\n", res, unit);
fprintf(out, "#Logging parameters:\n");
fprintf(out, "log-level-console = %s\n", deparse_log_level(config->log_level_console));
fprintf(out, "log-level-file = %s\n", deparse_log_level(config->log_level_file));
fprintf(out, "log-filename = %s\n", config->log_filename);
if (config->error_log_filename)
fprintf(out, "error-log-filename = %s\n", config->error_log_filename);
if (strcmp(config->log_directory, LOG_DIRECTORY_DEFAULT) == 0)
fprintf(out, "log-directory = %s/%s\n", backup_path, config->log_directory);
else
fprintf(out, "log-directory = %s\n", config->log_directory);
/* Convert values from base unit */
convert_from_base_unit_u(config->log_rotation_size, OPTION_UNIT_KB,
&res, &unit);
fprintf(out, "log-rotation-size = " UINT64_FORMAT "%s\n", res, (res)?unit:"KB");
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_S,
&res, &unit);
fprintf(out, "log-rotation-age = " UINT64_FORMAT "%s\n", res, (res)?unit:"min");
fprintf(out, "#Retention parameters:\n");
fprintf(out, "retention-redundancy = %u\n", config->retention_redundancy);
fprintf(out, "retention-window = %u\n", config->retention_window);
fprintf(out, "#Compression parameters:\n");
fprintf(out, "compress-algorithm = %s\n", deparse_compress_alg(config->compress_alg));
fprintf(out, "compress-level = %d\n", config->compress_level);
}
void
writeBackupCatalogConfigFile(pgBackupConfig *config)
{
char path[MAXPGPATH];
FILE *fp;
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
fp = fopen(path, "wt");
if (fp == NULL)
elog(ERROR, "cannot create %s: %s",
BACKUP_CATALOG_CONF_FILE, strerror(errno));
writeBackupCatalogConfig(fp, config);
fclose(fp);
}
pgBackupConfig*
readBackupCatalogConfigFile(void)
{
pgBackupConfig *config = pgut_new(pgBackupConfig);
char path[MAXPGPATH];
pgut_option options[] =
{
/* retention options */
{ 'u', 0, "retention-redundancy", &(config->retention_redundancy),SOURCE_FILE_STRICT },
{ 'u', 0, "retention-window", &(config->retention_window), SOURCE_FILE_STRICT },
/* compression options */
{ 'f', 0, "compress-algorithm", opt_compress_alg, SOURCE_CMDLINE },
{ 'u', 0, "compress-level", &(config->compress_level), SOURCE_CMDLINE },
/* logging options */
{ 'f', 0, "log-level-console", opt_log_level_console, SOURCE_CMDLINE },
{ 'f', 0, "log-level-file", opt_log_level_file, SOURCE_CMDLINE },
{ 's', 0, "log-filename", &(config->log_filename), SOURCE_CMDLINE },
{ 's', 0, "error-log-filename", &(config->error_log_filename), SOURCE_CMDLINE },
{ 's', 0, "log-directory", &(config->log_directory), SOURCE_CMDLINE },
{ 'u', 0, "log-rotation-size", &(config->log_rotation_size), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
{ 'u', 0, "log-rotation-age", &(config->log_rotation_age), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
/* connection options */
{ 's', 0, "pgdata", &(config->pgdata), SOURCE_FILE_STRICT },
{ 's', 0, "pgdatabase", &(config->pgdatabase), SOURCE_FILE_STRICT },
{ 's', 0, "pghost", &(config->pghost), SOURCE_FILE_STRICT },
{ 's', 0, "pgport", &(config->pgport), SOURCE_FILE_STRICT },
{ 's', 0, "pguser", &(config->pguser), SOURCE_FILE_STRICT },
/* replica options */
{ 's', 0, "master-host", &(config->master_host), SOURCE_FILE_STRICT },
{ 's', 0, "master-port", &(config->master_port), SOURCE_FILE_STRICT },
{ 's', 0, "master-db", &(config->master_db), SOURCE_FILE_STRICT },
{ 's', 0, "master-user", &(config->master_user), SOURCE_FILE_STRICT },
{ 'u', 0, "replica-timeout", &(config->replica_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
/* other options */
{ 'U', 0, "system-identifier", &(config->system_identifier), SOURCE_FILE_STRICT },
#if PG_VERSION_NUM >= 110000
{'u', 0, "xlog-seg-size", &config->xlog_seg_size, SOURCE_FILE_STRICT},
#endif
/* archive options */
{ 'u', 0, "archive-timeout", &(config->archive_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
{0}
};
cur_config = config;
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
pgBackupConfigInit(config);
pgut_readopt(path, options, ERROR, true);
#if PG_VERSION_NUM >= 110000
if (!IsValidWalSegSize(config->xlog_seg_size))
elog(ERROR, "Invalid WAL segment size %u", config->xlog_seg_size);
#endif
return config;
}
/*
* Read xlog-seg-size from BACKUP_CATALOG_CONF_FILE.
*/
uint32
get_config_xlog_seg_size(void)
{
#if PG_VERSION_NUM >= 110000
char path[MAXPGPATH];
uint32 seg_size;
pgut_option options[] =
{
{'u', 0, "xlog-seg-size", &seg_size, SOURCE_FILE_STRICT},
{0}
};
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
pgut_readopt(path, options, ERROR, false);
if (!IsValidWalSegSize(seg_size))
elog(ERROR, "Invalid WAL segment size %u", seg_size);
return seg_size;
#else
return (uint32) XLOG_SEG_SIZE;
#endif
}
static void
opt_log_level_console(pgut_option *opt, const char *arg)
{
cur_config->log_level_console = parse_log_level(arg);
}
static void
opt_log_level_file(pgut_option *opt, const char *arg)
{
cur_config->log_level_file = parse_log_level(arg);
}
static void
opt_compress_alg(pgut_option *opt, const char *arg)
{
cur_config->compress_alg = parse_compress_alg(arg);
}
/*
* Initialize configure visualization.
*/
static void
show_configure_start(void)
{
if (show_format == SHOW_PLAIN)
return;
/* For now we need buffer only for JSON format */
json_level = 0;
initPQExpBuffer(&show_buf);
}
/*
* Finalize configure visualization.
*/
static void
show_configure_end(void)
{
if (show_format == SHOW_PLAIN)
return;
else
appendPQExpBufferChar(&show_buf, '\n');
fputs(show_buf.data, stdout);
termPQExpBuffer(&show_buf);
}
/*
* Show configure information of pg_probackup.
*/
static void
show_configure(pgBackupConfig *config)
{
show_configure_start();
if (show_format == SHOW_PLAIN)
writeBackupCatalogConfig(stdout, config);
else
show_configure_json(config);
show_configure_end();
}
/*
* Json output.
*/
static void
show_configure_json(pgBackupConfig *config)
{
PQExpBuffer buf = &show_buf;
uint64 res;
const char *unit;
json_add(buf, JT_BEGIN_OBJECT, &json_level);
json_add_value(buf, "pgdata", config->pgdata, json_level, false);
json_add_key(buf, "system-identifier", json_level, true);
appendPQExpBuffer(buf, UINT64_FORMAT, config->system_identifier);
#if PG_VERSION_NUM >= 110000
json_add_key(buf, "xlog-seg-size", json_level, true);
appendPQExpBuffer(buf, "%u", config->xlog_seg_size);
#endif
/* Connection parameters */
if (config->pgdatabase)
json_add_value(buf, "pgdatabase", config->pgdatabase, json_level, true);
if (config->pghost)
json_add_value(buf, "pghost", config->pghost, json_level, true);
if (config->pgport)
json_add_value(buf, "pgport", config->pgport, json_level, true);
if (config->pguser)
json_add_value(buf, "pguser", config->pguser, json_level, true);
/* Replica parameters */
if (config->master_host)
json_add_value(buf, "master-host", config->master_host, json_level,
true);
if (config->master_port)
json_add_value(buf, "master-port", config->master_port, json_level,
true);
if (config->master_db)
json_add_value(buf, "master-db", config->master_db, json_level, true);
if (config->master_user)
json_add_value(buf, "master-user", config->master_user, json_level,
true);
json_add_key(buf, "replica-timeout", json_level, true);
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_S,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, unit);
/* Archive parameters */
json_add_key(buf, "archive-timeout", json_level, true);
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_S,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, unit);
/* Logging parameters */
json_add_value(buf, "log-level-console",
deparse_log_level(config->log_level_console), json_level,
true);
json_add_value(buf, "log-level-file",
deparse_log_level(config->log_level_file), json_level,
true);
json_add_value(buf, "log-filename", config->log_filename, json_level,
true);
if (config->error_log_filename)
json_add_value(buf, "error-log-filename", config->error_log_filename,
json_level, true);
if (strcmp(config->log_directory, LOG_DIRECTORY_DEFAULT) == 0)
{
char log_directory_fullpath[MAXPGPATH];
sprintf(log_directory_fullpath, "%s/%s",
backup_path, config->log_directory);
json_add_value(buf, "log-directory", log_directory_fullpath,
json_level, true);
}
else
json_add_value(buf, "log-directory", config->log_directory,
json_level, true);
json_add_key(buf, "log-rotation-size", json_level, true);
convert_from_base_unit_u(config->log_rotation_size, OPTION_UNIT_KB,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, (res)?unit:"KB");
json_add_key(buf, "log-rotation-age", json_level, true);
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_S,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, (res)?unit:"min");
/* Retention parameters */
json_add_key(buf, "retention-redundancy", json_level, true);
appendPQExpBuffer(buf, "%u", config->retention_redundancy);
json_add_key(buf, "retention-window", json_level, true);
appendPQExpBuffer(buf, "%u", config->retention_window);
/* Compression parameters */
json_add_value(buf, "compress-algorithm",
deparse_compress_alg(config->compress_alg), json_level,
true);
json_add_key(buf, "compress-level", json_level, true);
appendPQExpBuffer(buf, "%d", config->compress_level);
json_add(buf, JT_END_OBJECT, &json_level);
}

1407
src/data.c Normal file

File diff suppressed because it is too large Load Diff

464
src/delete.c Normal file
View File

@ -0,0 +1,464 @@
/*-------------------------------------------------------------------------
*
* delete.c: delete backup files.
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <dirent.h>
#include <time.h>
#include <unistd.h>
static int pgBackupDeleteFiles(pgBackup *backup);
static void delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli,
uint32 xlog_seg_size);
int
do_delete(time_t backup_id)
{
int i;
parray *backup_list,
*delete_list;
pgBackup *target_backup = NULL;
time_t parent_id = 0;
XLogRecPtr oldest_lsn = InvalidXLogRecPtr;
TimeLineID oldest_tli = 0;
/* Get exclusive lock of backup catalog */
catalog_lock();
/* Get complete list of backups */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
if (backup_id != 0)
{
delete_list = parray_new();
/* Find backup to be deleted and make increment backups array to be deleted */
for (i = (int) parray_num(backup_list) - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, (size_t) i);
if (backup->start_time == backup_id)
{
parray_append(delete_list, backup);
/*
* Do not remove next backups, if target backup was finished
* incorrectly.
*/
if (backup->status == BACKUP_STATUS_ERROR)
break;
/* Save backup id to retreive increment backups */
parent_id = backup->start_time;
target_backup = backup;
}
else if (target_backup)
{
if (backup->backup_mode != BACKUP_MODE_FULL &&
backup->parent_backup == parent_id)
{
/* Append to delete list increment backup */
parray_append(delete_list, backup);
/* Save backup id to retreive increment backups */
parent_id = backup->start_time;
}
else
break;
}
}
if (parray_num(delete_list) == 0)
elog(ERROR, "no backup found, cannot delete");
/* Delete backups from the end of list */
for (i = (int) parray_num(delete_list) - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(delete_list, (size_t) i);
if (interrupted)
elog(ERROR, "interrupted during delete backup");
pgBackupDeleteFiles(backup);
}
parray_free(delete_list);
}
/* Clean WAL segments */
if (delete_wal)
{
Assert(target_backup);
/* Find oldest LSN, used by backups */
for (i = (int) parray_num(backup_list) - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, (size_t) i);
if (backup->status == BACKUP_STATUS_OK)
{
oldest_lsn = backup->start_lsn;
oldest_tli = backup->tli;
break;
}
}
delete_walfiles(oldest_lsn, oldest_tli, xlog_seg_size);
}
/* cleanup */
parray_walk(backup_list, pgBackupFree);
parray_free(backup_list);
return 0;
}
/*
* Remove backups by retention policy. Retention policy is configured by
* retention_redundancy and retention_window variables.
*/
int
do_retention_purge(void)
{
parray *backup_list;
uint32 backup_num;
size_t i;
time_t days_threshold = time(NULL) - (retention_window * 60 * 60 * 24);
XLogRecPtr oldest_lsn = InvalidXLogRecPtr;
TimeLineID oldest_tli = 0;
bool keep_next_backup = true; /* Do not delete first full backup */
bool backup_deleted = false; /* At least one backup was deleted */
if (delete_expired)
{
if (retention_redundancy > 0)
elog(LOG, "REDUNDANCY=%u", retention_redundancy);
if (retention_window > 0)
elog(LOG, "WINDOW=%u", retention_window);
if (retention_redundancy == 0
&& retention_window == 0)
{
elog(WARNING, "Retention policy is not set");
if (!delete_wal)
return 0;
}
}
/* Get exclusive lock of backup catalog */
catalog_lock();
/* Get a complete list of backups. */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
if (parray_num(backup_list) == 0)
{
elog(INFO, "backup list is empty, purging won't be executed");
return 0;
}
/* Find target backups to be deleted */
if (delete_expired &&
(retention_redundancy > 0 || retention_window > 0))
{
backup_num = 0;
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
uint32 backup_num_evaluate = backup_num;
/* Consider only validated and correct backups */
if (backup->status != BACKUP_STATUS_OK)
continue;
/*
* When a valid full backup was found, we can delete the
* backup that is older than it using the number of generations.
*/
if (backup->backup_mode == BACKUP_MODE_FULL)
backup_num++;
/* Evaluate retention_redundancy if this backup is eligible for removal */
if (keep_next_backup ||
retention_redundancy >= backup_num_evaluate + 1 ||
(retention_window > 0 && backup->recovery_time >= days_threshold))
{
/* Save LSN and Timeline to remove unnecessary WAL segments */
oldest_lsn = backup->start_lsn;
oldest_tli = backup->tli;
/* Save parent backup of this incremental backup */
if (backup->backup_mode != BACKUP_MODE_FULL)
keep_next_backup = true;
/*
* Previous incremental backup was kept or this is first backup
* so do not delete this backup.
*/
else
keep_next_backup = false;
continue;
}
/* Delete backup and update status to DELETED */
pgBackupDeleteFiles(backup);
backup_deleted = true;
}
}
/*
* If oldest_lsn and oldest_tli weren`t set because previous step was skipped
* then set them now if we are going to purge WAL
*/
if (delete_wal && (XLogRecPtrIsInvalid(oldest_lsn)))
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, parray_num(backup_list) - 1);
oldest_lsn = backup->start_lsn;
oldest_tli = backup->tli;
}
/* Be paranoid */
if (XLogRecPtrIsInvalid(oldest_lsn))
elog(ERROR, "Not going to purge WAL because LSN is invalid");
/* Purge WAL files */
if (delete_wal)
{
delete_walfiles(oldest_lsn, oldest_tli, xlog_seg_size);
}
/* Cleanup */
parray_walk(backup_list, pgBackupFree);
parray_free(backup_list);
if (backup_deleted)
elog(INFO, "Purging finished");
else
elog(INFO, "Nothing to delete by retention policy");
return 0;
}
/*
* Delete backup files of the backup and update the status of the backup to
* BACKUP_STATUS_DELETED.
*/
static int
pgBackupDeleteFiles(pgBackup *backup)
{
size_t i;
char path[MAXPGPATH];
char timestamp[100];
parray *files;
/*
* If the backup was deleted already, there is nothing to do.
*/
if (backup->status == BACKUP_STATUS_DELETED)
return 0;
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
elog(INFO, "delete: %s %s",
base36enc(backup->start_time), timestamp);
/*
* Update STATUS to BACKUP_STATUS_DELETING in preparation for the case which
* the error occurs before deleting all backup files.
*/
backup->status = BACKUP_STATUS_DELETING;
pgBackupWriteBackupControlFile(backup);
/* list files to be deleted */
files = parray_new();
pgBackupGetPath(backup, path, lengthof(path), NULL);
dir_list_file(files, path, false, true, true);
/* delete leaf node first */
parray_qsort(files, pgFileComparePathDesc);
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
/* print progress */
elog(VERBOSE, "delete file(%zd/%lu) \"%s\"", i + 1,
(unsigned long) parray_num(files), file->path);
if (remove(file->path))
{
elog(WARNING, "can't remove \"%s\": %s", file->path,
strerror(errno));
parray_walk(files, pgFileFree);
parray_free(files);
return 1;
}
}
parray_walk(files, pgFileFree);
parray_free(files);
backup->status = BACKUP_STATUS_DELETED;
return 0;
}
/*
* Deletes WAL segments up to oldest_lsn or all WAL segments (if all backups
* was deleted and so oldest_lsn is invalid).
*
* oldest_lsn - if valid, function deletes WAL segments, which contain lsn
* older than oldest_lsn. If it is invalid function deletes all WAL segments.
* oldest_tli - is used to construct oldest WAL segment in addition to
* oldest_lsn.
*/
static void
delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli,
uint32 xlog_seg_size)
{
XLogSegNo targetSegNo;
char oldestSegmentNeeded[MAXFNAMELEN];
DIR *arcdir;
struct dirent *arcde;
char wal_file[MAXPGPATH];
char max_wal_file[MAXPGPATH];
char min_wal_file[MAXPGPATH];
int rc;
max_wal_file[0] = '\0';
min_wal_file[0] = '\0';
if (!XLogRecPtrIsInvalid(oldest_lsn))
{
GetXLogSegNo(oldest_lsn, targetSegNo, xlog_seg_size);
GetXLogFileName(oldestSegmentNeeded, oldest_tli, targetSegNo,
xlog_seg_size);
elog(LOG, "removing WAL segments older than %s", oldestSegmentNeeded);
}
else
elog(LOG, "removing all WAL segments");
/*
* Now it is time to do the actual work and to remove all the segments
* not needed anymore.
*/
if ((arcdir = opendir(arclog_path)) != NULL)
{
while (errno = 0, (arcde = readdir(arcdir)) != NULL)
{
/*
* We ignore the timeline part of the WAL segment identifiers in
* deciding whether a segment is still needed. This ensures that
* we won't prematurely remove a segment from a parent timeline.
* We could probably be a little more proactive about removing
* segments of non-parent timelines, but that would be a whole lot
* more complicated.
*
* We use the alphanumeric sorting property of the filenames to
* decide which ones are earlier than the exclusiveCleanupFileName
* file. Note that this means files are not removed in the order
* they were originally written, in case this worries you.
*
* We also should not forget that WAL segment can be compressed.
*/
if (IsXLogFileName(arcde->d_name) ||
IsPartialXLogFileName(arcde->d_name) ||
IsBackupHistoryFileName(arcde->d_name) ||
IsCompressedXLogFileName(arcde->d_name))
{
if (XLogRecPtrIsInvalid(oldest_lsn) ||
strncmp(arcde->d_name + 8, oldestSegmentNeeded + 8, 16) < 0)
{
/*
* Use the original file name again now, including any
* extension that might have been chopped off before testing
* the sequence.
*/
snprintf(wal_file, MAXPGPATH, "%s/%s",
arclog_path, arcde->d_name);
rc = unlink(wal_file);
if (rc != 0)
{
elog(WARNING, "could not remove file \"%s\": %s",
wal_file, strerror(errno));
break;
}
elog(LOG, "removed WAL segment \"%s\"", wal_file);
if (max_wal_file[0] == '\0' ||
strcmp(max_wal_file + 8, arcde->d_name + 8) < 0)
strcpy(max_wal_file, arcde->d_name);
if (min_wal_file[0] == '\0' ||
strcmp(min_wal_file + 8, arcde->d_name + 8) > 0)
strcpy(min_wal_file, arcde->d_name);
}
}
}
if (min_wal_file[0] != '\0')
elog(INFO, "removed min WAL segment \"%s\"", min_wal_file);
if (max_wal_file[0] != '\0')
elog(INFO, "removed max WAL segment \"%s\"", max_wal_file);
if (errno)
elog(WARNING, "could not read archive location \"%s\": %s",
arclog_path, strerror(errno));
if (closedir(arcdir))
elog(WARNING, "could not close archive location \"%s\": %s",
arclog_path, strerror(errno));
}
else
elog(WARNING, "could not open archive location \"%s\": %s",
arclog_path, strerror(errno));
}
/* Delete all backup files and wal files of given instance. */
int
do_delete_instance(void)
{
parray *backup_list;
int i;
char instance_config_path[MAXPGPATH];
/* Delete all backups. */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
pgBackupDeleteFiles(backup);
}
/* Cleanup */
parray_walk(backup_list, pgBackupFree);
parray_free(backup_list);
/* Delete all wal files. */
delete_walfiles(InvalidXLogRecPtr, 0, xlog_seg_size);
/* Delete backup instance config file */
join_path_components(instance_config_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
if (remove(instance_config_path))
{
elog(ERROR, "can't remove \"%s\": %s", instance_config_path,
strerror(errno));
}
/* Delete instance root directories */
if (rmdir(backup_instance_path) != 0)
elog(ERROR, "can't remove \"%s\": %s", backup_instance_path,
strerror(errno));
if (rmdir(arclog_path) != 0)
elog(ERROR, "can't remove \"%s\": %s", backup_instance_path,
strerror(errno));
elog(INFO, "Instance '%s' successfully deleted", instance_name);
return 0;
}

1491
src/dir.c Normal file

File diff suppressed because it is too large Load Diff

116
src/fetch.c Normal file
View File

@ -0,0 +1,116 @@
/*-------------------------------------------------------------------------
*
* fetch.c
* Functions for fetching files from PostgreSQL data directory
*
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
*
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include "catalog/catalog.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include "pg_probackup.h"
/*
* Read a file into memory. The file to be read is <datadir>/<path>.
* The file contents are returned in a malloc'd buffer, and *filesize
* is set to the length of the file.
*
* The returned buffer is always zero-terminated; the size of the returned
* buffer is actually *filesize + 1. That's handy when reading a text file.
* This function can be used to read binary files as well, you can just
* ignore the zero-terminator in that case.
*
*/
char *
slurpFile(const char *datadir, const char *path, size_t *filesize, bool safe)
{
int fd;
char *buffer;
struct stat statbuf;
char fullpath[MAXPGPATH];
int len;
snprintf(fullpath, sizeof(fullpath), "%s/%s", datadir, path);
if ((fd = open(fullpath, O_RDONLY | PG_BINARY, 0)) == -1)
{
if (safe)
return NULL;
else
elog(ERROR, "could not open file \"%s\" for reading: %s",
fullpath, strerror(errno));
}
if (fstat(fd, &statbuf) < 0)
{
if (safe)
return NULL;
else
elog(ERROR, "could not open file \"%s\" for reading: %s",
fullpath, strerror(errno));
}
len = statbuf.st_size;
buffer = pg_malloc(len + 1);
if (read(fd, buffer, len) != len)
{
if (safe)
return NULL;
else
elog(ERROR, "could not read file \"%s\": %s\n",
fullpath, strerror(errno));
}
close(fd);
/* Zero-terminate the buffer. */
buffer[len] = '\0';
if (filesize)
*filesize = len;
return buffer;
}
/*
* Receive a single file as a malloc'd buffer.
*/
char *
fetchFile(PGconn *conn, const char *filename, size_t *filesize)
{
PGresult *res;
char *result;
const char *params[1];
int len;
params[0] = filename;
res = pgut_execute_extended(conn, "SELECT pg_catalog.pg_read_binary_file($1)",
1, params, false, false);
/* sanity check the result set */
if (PQntuples(res) != 1 || PQgetisnull(res, 0, 0))
elog(ERROR, "unexpected result set while fetching remote file \"%s\"",
filename);
/* Read result to local variables */
len = PQgetlength(res, 0, 0);
result = pg_malloc(len + 1);
memcpy(result, PQgetvalue(res, 0, 0), len);
result[len] = '\0';
PQclear(res);
*filesize = len;
return result;
}

605
src/help.c Normal file
View File

@ -0,0 +1,605 @@
/*-------------------------------------------------------------------------
*
* help.c
*
* Copyright (c) 2017-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
static void help_init(void);
static void help_backup(void);
static void help_restore(void);
static void help_validate(void);
static void help_show(void);
static void help_delete(void);
static void help_merge(void);
static void help_set_config(void);
static void help_show_config(void);
static void help_add_instance(void);
static void help_del_instance(void);
static void help_archive_push(void);
static void help_archive_get(void);
void
help_command(char *command)
{
if (strcmp(command, "init") == 0)
help_init();
else if (strcmp(command, "backup") == 0)
help_backup();
else if (strcmp(command, "restore") == 0)
help_restore();
else if (strcmp(command, "validate") == 0)
help_validate();
else if (strcmp(command, "show") == 0)
help_show();
else if (strcmp(command, "delete") == 0)
help_delete();
else if (strcmp(command, "merge") == 0)
help_merge();
else if (strcmp(command, "set-config") == 0)
help_set_config();
else if (strcmp(command, "show-config") == 0)
help_show_config();
else if (strcmp(command, "add-instance") == 0)
help_add_instance();
else if (strcmp(command, "del-instance") == 0)
help_del_instance();
else if (strcmp(command, "archive-push") == 0)
help_archive_push();
else if (strcmp(command, "archive-get") == 0)
help_archive_get();
else if (strcmp(command, "--help") == 0
|| strcmp(command, "help") == 0
|| strcmp(command, "-?") == 0
|| strcmp(command, "--version") == 0
|| strcmp(command, "version") == 0
|| strcmp(command, "-V") == 0)
printf(_("No help page for \"%s\" command. Try pg_probackup help\n"), command);
else
printf(_("Unknown command \"%s\". Try pg_probackup help\n"), command);
exit(0);
}
void
help_pg_probackup(void)
{
printf(_("\n%s - utility to manage backup/recovery of PostgreSQL database.\n\n"), PROGRAM_NAME);
printf(_(" %s help [COMMAND]\n"), PROGRAM_NAME);
printf(_("\n %s version\n"), PROGRAM_NAME);
printf(_("\n %s init -B backup-path\n"), PROGRAM_NAME);
printf(_("\n %s set-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--log-level-console=log-level-console]\n"));
printf(_(" [--log-level-file=log-level-file]\n"));
printf(_(" [--log-filename=log-filename]\n"));
printf(_(" [--error-log-filename=error-log-filename]\n"));
printf(_(" [--log-directory=log-directory]\n"));
printf(_(" [--log-rotation-size=log-rotation-size]\n"));
printf(_(" [--log-rotation-age=log-rotation-age]\n"));
printf(_(" [--retention-redundancy=retention-redundancy]\n"));
printf(_(" [--retention-window=retention-window]\n"));
printf(_(" [--compress-algorithm=compress-algorithm]\n"));
printf(_(" [--compress-level=compress-level]\n"));
printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n"));
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
printf(_(" [--replica-timeout=timeout]\n"));
printf(_(" [--archive-timeout=timeout]\n"));
printf(_("\n %s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--format=format]\n"));
printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-C] [--stream [-S slot-name]] [--backup-pg-log]\n"));
printf(_(" [-j num-threads] [--archive-timeout=archive-timeout]\n"));
printf(_(" [--progress]\n"));
printf(_(" [--log-level-console=log-level-console]\n"));
printf(_(" [--log-level-file=log-level-file]\n"));
printf(_(" [--log-filename=log-filename]\n"));
printf(_(" [--error-log-filename=error-log-filename]\n"));
printf(_(" [--log-directory=log-directory]\n"));
printf(_(" [--log-rotation-size=log-rotation-size]\n"));
printf(_(" [--log-rotation-age=log-rotation-age]\n"));
printf(_(" [--delete-expired] [--delete-wal]\n"));
printf(_(" [--retention-redundancy=retention-redundancy]\n"));
printf(_(" [--retention-window=retention-window]\n"));
printf(_(" [--compress]\n"));
printf(_(" [--compress-algorithm=compress-algorithm]\n"));
printf(_(" [--compress-level=compress-level]\n"));
printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n"));
printf(_(" [-w --no-password] [-W --password]\n"));
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
printf(_(" [--replica-timeout=timeout]\n"));
printf(_("\n %s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-path] [-i backup-id] [--progress]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--timeline=timeline] [-T OLDDIR=NEWDIR]\n"));
printf(_(" [--immediate] [--recovery-target-name=target-name]\n"));
printf(_(" [--recovery-target-action=pause|promote|shutdown]\n"));
printf(_(" [--restore-as-replica]\n"));
printf(_(" [--no-validate]\n"));
printf(_("\n %s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME);
printf(_(" [-i backup-id] [--progress]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--recovery-target-name=target-name]\n"));
printf(_(" [--timeline=timeline]\n"));
printf(_("\n %s show -B backup-path\n"), PROGRAM_NAME);
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
printf(_(" [--format=format]\n"));
printf(_("\n %s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--wal] [-i backup-id | --expired]\n"));
printf(_("\n %s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" -i backup-id\n"));
printf(_("\n %s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME);
printf(_(" --instance=instance_name\n"));
printf(_("\n %s del-instance -B backup-path\n"), PROGRAM_NAME);
printf(_(" --instance=instance_name\n"));
printf(_("\n %s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" --wal-file-path=wal-file-path\n"));
printf(_(" --wal-file-name=wal-file-name\n"));
printf(_(" [--compress]\n"));
printf(_(" [--compress-algorithm=compress-algorithm]\n"));
printf(_(" [--compress-level=compress-level]\n"));
printf(_(" [--overwrite]\n"));
printf(_("\n %s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" --wal-file-path=wal-file-path\n"));
printf(_(" --wal-file-name=wal-file-name\n"));
if ((PROGRAM_URL || PROGRAM_EMAIL))
{
printf("\n");
if (PROGRAM_URL)
printf("Read the website for details. <%s>\n", PROGRAM_URL);
if (PROGRAM_EMAIL)
printf("Report bugs to <%s>.\n", PROGRAM_EMAIL);
}
exit(0);
}
static void
help_init(void)
{
printf(_("%s init -B backup-path\n\n"), PROGRAM_NAME);
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
}
static void
help_backup(void)
{
printf(_("%s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-C] [--stream [-S slot-name]] [--backup-pg-log]\n"));
printf(_(" [-j num-threads] [--archive-timeout=archive-timeout]\n"));
printf(_(" [--progress]\n"));
printf(_(" [--log-level-console=log-level-console]\n"));
printf(_(" [--log-level-file=log-level-file]\n"));
printf(_(" [--log-filename=log-filename]\n"));
printf(_(" [--error-log-filename=error-log-filename]\n"));
printf(_(" [--log-directory=log-directory]\n"));
printf(_(" [--log-rotation-size=log-rotation-size]\n"));
printf(_(" [--log-rotation-age=log-rotation-age]\n"));
printf(_(" [--delete-expired] [--delete-wal]\n"));
printf(_(" [--retention-redundancy=retention-redundancy]\n"));
printf(_(" [--retention-window=retention-window]\n"));
printf(_(" [--compress]\n"));
printf(_(" [--compress-algorithm=compress-algorithm]\n"));
printf(_(" [--compress-level=compress-level]\n"));
printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n"));
printf(_(" [-w --no-password] [-W --password]\n"));
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
printf(_(" [--replica-timeout=timeout]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" -C, --smooth-checkpoint do smooth checkpoint before backup\n"));
printf(_(" --stream stream the transaction log and include it in the backup\n"));
printf(_(" -S, --slot=SLOTNAME replication slot to use\n"));
printf(_(" --backup-pg-log backup of pg_log directory\n"));
printf(_(" -j, --threads=NUM number of parallel threads\n"));
printf(_(" --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n"));
printf(_(" --progress show progress\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
printf(_(" level for console logging (default: info)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-level-file=log-level-file\n"));
printf(_(" level for file logging (default: off)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-filename=log-filename\n"));
printf(_(" filename for file logging (default: 'pg_probackup.log')\n"));
printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n"));
printf(_(" --error-log-filename=error-log-filename\n"));
printf(_(" filename for error logging (default: none)\n"));
printf(_(" --log-directory=log-directory\n"));
printf(_(" directory for file logging (default: BACKUP_PATH/log)\n"));
printf(_(" --log-rotation-size=log-rotation-size\n"));
printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n"));
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
printf(_("\n Retention options:\n"));
printf(_(" --delete-expired delete backups expired according to current\n"));
printf(_(" retention policy after successful backup completion\n"));
printf(_(" --delete-wal remove redundant archived wal files\n"));
printf(_(" --retention-redundancy=retention-redundancy\n"));
printf(_(" number of full backups to keep; 0 disables; (default: 0)\n"));
printf(_(" --retention-window=retention-window\n"));
printf(_(" number of days of recoverability; 0 disables; (default: 0)\n"));
printf(_("\n Compression options:\n"));
printf(_(" --compress compress data files\n"));
printf(_(" --compress-algorithm=compress-algorithm\n"));
printf(_(" available options: 'zlib', 'pglz', 'none' (default: zlib)\n"));
printf(_(" --compress-level=compress-level\n"));
printf(_(" level of compression [0-9] (default: 1)\n"));
printf(_("\n Connection options:\n"));
printf(_(" -U, --username=USERNAME user name to connect as (default: current local user)\n"));
printf(_(" -d, --dbname=DBNAME database to connect (default: username)\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory(default: 'local socket')\n"));
printf(_(" -p, --port=PORT database server port (default: 5432)\n"));
printf(_(" -w, --no-password never prompt for password\n"));
printf(_(" -W, --password force password prompt\n"));
printf(_("\n Replica options:\n"));
printf(_(" --master-user=user_name user name to connect to master\n"));
printf(_(" --master-db=db_name database to connect to master\n"));
printf(_(" --master-host=host_name database server host of master\n"));
printf(_(" --master-port=port database server port of master\n"));
printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (default: 5min)\n"));
}
static void
help_restore(void)
{
printf(_("%s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-path] [-i backup-id] [--progress]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--timeline=timeline] [-T OLDDIR=NEWDIR]\n"));
printf(_(" [--immediate] [--recovery-target-name=target-name]\n"));
printf(_(" [--recovery-target-action=pause|promote|shutdown]\n"));
printf(_(" [--restore-as-replica] [--no-validate]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n"));
printf(_(" -i, --backup-id=backup-id backup to restore\n"));
printf(_(" --progress show progress\n"));
printf(_(" --time=time time stamp up to which recovery will proceed\n"));
printf(_(" --xid=xid transaction ID up to which recovery will proceed\n"));
printf(_(" --lsn=lsn LSN of the write-ahead log location up to which recovery will proceed\n"));
printf(_(" --inclusive=boolean whether we stop just after the recovery target\n"));
printf(_(" --timeline=timeline recovering into a particular timeline\n"));
printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n"));
printf(_(" relocate the tablespace from directory OLDDIR to NEWDIR\n"));
printf(_(" --immediate end recovery as soon as a consistent state is reached\n"));
printf(_(" --recovery-target-name=target-name\n"));
printf(_(" the named restore point to which recovery will proceed\n"));
printf(_(" --recovery-target-action=pause|promote|shutdown\n"));
printf(_(" action the server should take once the recovery target is reached\n"));
printf(_(" (default: pause)\n"));
printf(_(" -R, --restore-as-replica write a minimal recovery.conf in the output directory\n"));
printf(_(" to ease setting up a standby server\n"));
printf(_(" --no-validate disable backup validation during restore\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
printf(_(" level for console logging (default: info)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-level-file=log-level-file\n"));
printf(_(" level for file logging (default: off)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-filename=log-filename\n"));
printf(_(" filename for file logging (default: 'pg_probackup.log')\n"));
printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n"));
printf(_(" --error-log-filename=error-log-filename\n"));
printf(_(" filename for error logging (default: none)\n"));
printf(_(" --log-directory=log-directory\n"));
printf(_(" directory for file logging (default: BACKUP_PATH/log)\n"));
printf(_(" --log-rotation-size=log-rotation-size\n"));
printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n"));
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
}
static void
help_validate(void)
{
printf(_("%s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME);
printf(_(" [-i backup-id] [--progress]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--timeline=timeline]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" -i, --backup-id=backup-id backup to validate\n"));
printf(_(" --progress show progress\n"));
printf(_(" --time=time time stamp up to which recovery will proceed\n"));
printf(_(" --xid=xid transaction ID up to which recovery will proceed\n"));
printf(_(" --lsn=lsn LSN of the write-ahead log location up to which recovery will proceed\n"));
printf(_(" --inclusive=boolean whether we stop just after the recovery target\n"));
printf(_(" --timeline=timeline recovering into a particular timeline\n"));
printf(_(" --recovery-target-name=target-name\n"));
printf(_(" the named restore point to which recovery will proceed\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
printf(_(" level for console logging (default: info)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-level-file=log-level-file\n"));
printf(_(" level for file logging (default: off)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-filename=log-filename\n"));
printf(_(" filename for file logging (default: 'pg_probackup.log')\n"));
printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n"));
printf(_(" --error-log-filename=error-log-filename\n"));
printf(_(" filename for error logging (default: none)\n"));
printf(_(" --log-directory=log-directory\n"));
printf(_(" directory for file logging (default: BACKUP_PATH/log)\n"));
printf(_(" --log-rotation-size=log-rotation-size\n"));
printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n"));
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
}
static void
help_show(void)
{
printf(_("%s show -B backup-path\n"), PROGRAM_NAME);
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
printf(_(" [--format=format]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name show info about specific intstance\n"));
printf(_(" -i, --backup-id=backup-id show info about specific backups\n"));
printf(_(" --format=format show format=PLAIN|JSON\n"));
}
static void
help_delete(void)
{
printf(_("%s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-i backup-id | --expired] [--wal]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" -i, --backup-id=backup-id backup to delete\n"));
printf(_(" --expired delete backups expired according to current\n"));
printf(_(" retention policy\n"));
printf(_(" --wal remove unnecessary wal files in WAL ARCHIVE\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
printf(_(" level for console logging (default: info)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-level-file=log-level-file\n"));
printf(_(" level for file logging (default: off)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-filename=log-filename\n"));
printf(_(" filename for file logging (default: 'pg_probackup.log')\n"));
printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n"));
printf(_(" --error-log-filename=error-log-filename\n"));
printf(_(" filename for error logging (default: none)\n"));
printf(_(" --log-directory=log-directory\n"));
printf(_(" directory for file logging (default: BACKUP_PATH/log)\n"));
printf(_(" --log-rotation-size=log-rotation-size\n"));
printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n"));
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
}
static void
help_merge(void)
{
printf(_("%s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" -i backup-id [-j num-threads] [--progress]\n"));
printf(_(" [--log-level-console=log-level-console]\n"));
printf(_(" [--log-level-file=log-level-file]\n"));
printf(_(" [--log-filename=log-filename]\n"));
printf(_(" [--error-log-filename=error-log-filename]\n"));
printf(_(" [--log-directory=log-directory]\n"));
printf(_(" [--log-rotation-size=log-rotation-size]\n"));
printf(_(" [--log-rotation-age=log-rotation-age]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" -i, --backup-id=backup-id backup to merge\n"));
printf(_(" -j, --threads=NUM number of parallel threads\n"));
printf(_(" --progress show progress\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
printf(_(" level for console logging (default: info)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-level-file=log-level-file\n"));
printf(_(" level for file logging (default: off)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-filename=log-filename\n"));
printf(_(" filename for file logging (default: 'pg_probackup.log')\n"));
printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n"));
printf(_(" --error-log-filename=error-log-filename\n"));
printf(_(" filename for error logging (default: none)\n"));
printf(_(" --log-directory=log-directory\n"));
printf(_(" directory for file logging (default: BACKUP_PATH/log)\n"));
printf(_(" --log-rotation-size=log-rotation-size\n"));
printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n"));
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
}
static void
help_set_config(void)
{
printf(_("%s set-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--log-level-console=log-level-console]\n"));
printf(_(" [--log-level-file=log-level-file]\n"));
printf(_(" [--log-filename=log-filename]\n"));
printf(_(" [--error-log-filename=error-log-filename]\n"));
printf(_(" [--log-directory=log-directory]\n"));
printf(_(" [--log-rotation-size=log-rotation-size]\n"));
printf(_(" [--log-rotation-age=log-rotation-age]\n"));
printf(_(" [--retention-redundancy=retention-redundancy]\n"));
printf(_(" [--retention-window=retention-window]\n"));
printf(_(" [--compress-algorithm=compress-algorithm]\n"));
printf(_(" [--compress-level=compress-level]\n"));
printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n"));
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
printf(_(" [--replica-timeout=timeout]\n\n"));
printf(_(" [--archive-timeout=timeout]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
printf(_(" level for console logging (default: info)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-level-file=log-level-file\n"));
printf(_(" level for file logging (default: off)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-filename=log-filename\n"));
printf(_(" filename for file logging (default: 'pg_probackup.log')\n"));
printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n"));
printf(_(" --error-log-filename=error-log-filename\n"));
printf(_(" filename for error logging (default: none)\n"));
printf(_(" --log-directory=log-directory\n"));
printf(_(" directory for file logging (default: BACKUP_PATH/log)\n"));
printf(_(" --log-rotation-size=log-rotation-size\n"));
printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n"));
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
printf(_("\n Retention options:\n"));
printf(_(" --retention-redundancy=retention-redundancy\n"));
printf(_(" number of full backups to keep; 0 disables; (default: 0)\n"));
printf(_(" --retention-window=retention-window\n"));
printf(_(" number of days of recoverability; 0 disables; (default: 0)\n"));
printf(_("\n Compression options:\n"));
printf(_(" --compress-algorithm=compress-algorithm\n"));
printf(_(" available options: 'zlib','pglz','none'\n"));
printf(_(" --compress-level=compress-level\n"));
printf(_(" level of compression [0-9] (default: 1)\n"));
printf(_("\n Connection options:\n"));
printf(_(" -U, --username=USERNAME user name to connect as (default: current local user)\n"));
printf(_(" -d, --dbname=DBNAME database to connect (default: username)\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory(default: 'local socket')\n"));
printf(_(" -p, --port=PORT database server port (default: 5432)\n"));
printf(_("\n Replica options:\n"));
printf(_(" --master-user=user_name user name to connect to master\n"));
printf(_(" --master-db=db_name database to connect to master\n"));
printf(_(" --master-host=host_name database server host of master\n"));
printf(_(" --master-port=port database server port of master\n"));
printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (default: 5min)\n"));
printf(_("\n Archive options:\n"));
printf(_(" --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n"));
}
static void
help_show_config(void)
{
printf(_("%s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--format=format]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" --format=format show format=PLAIN|JSON\n"));
}
static void
help_add_instance(void)
{
printf(_("%s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME);
printf(_(" --instance=instance_name\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n"));
printf(_(" --instance=instance_name name of the new instance\n"));
}
static void
help_del_instance(void)
{
printf(_("%s del-instance -B backup-path --instance=instance_name\n\n"), PROGRAM_NAME);
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance to delete\n"));
}
static void
help_archive_push(void)
{
printf(_("\n %s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" --wal-file-path=wal-file-path\n"));
printf(_(" --wal-file-name=wal-file-name\n"));
printf(_(" [--compress]\n"));
printf(_(" [--compress-algorithm=compress-algorithm]\n"));
printf(_(" [--compress-level=compress-level]\n"));
printf(_(" [--overwrite]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance to delete\n"));
printf(_(" --wal-file-path=wal-file-path\n"));
printf(_(" relative path name of the WAL file on the server\n"));
printf(_(" --wal-file-name=wal-file-name\n"));
printf(_(" name of the WAL file to retrieve from the server\n"));
printf(_(" --compress compress WAL file during archiving\n"));
printf(_(" --compress-algorithm=compress-algorithm\n"));
printf(_(" available options: 'zlib','none'\n"));
printf(_(" --compress-level=compress-level\n"));
printf(_(" level of compression [0-9] (default: 1)\n"));
printf(_(" --overwrite overwrite archived WAL file\n"));
}
static void
help_archive_get(void)
{
printf(_("\n %s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" --wal-file-path=wal-file-path\n"));
printf(_(" --wal-file-name=wal-file-name\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance to delete\n"));
printf(_(" --wal-file-path=wal-file-path\n"));
printf(_(" relative destination path name of the WAL file on the server\n"));
printf(_(" --wal-file-name=wal-file-name\n"));
printf(_(" name of the WAL file to retrieve from the archive\n"));
}

108
src/init.c Normal file
View File

@ -0,0 +1,108 @@
/*-------------------------------------------------------------------------
*
* init.c: - initialize backup catalog.
*
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <unistd.h>
#include <dirent.h>
#include <sys/stat.h>
/*
* Initialize backup catalog.
*/
int
do_init(void)
{
char path[MAXPGPATH];
char arclog_path_dir[MAXPGPATH];
int results;
results = pg_check_dir(backup_path);
if (results == 4) /* exists and not empty*/
elog(ERROR, "backup catalog already exist and it's not empty");
else if (results == -1) /*trouble accessing directory*/
{
int errno_tmp = errno;
elog(ERROR, "cannot open backup catalog directory \"%s\": %s",
backup_path, strerror(errno_tmp));
}
/* create backup catalog root directory */
dir_create_dir(backup_path, DIR_PERMISSION);
/* create backup catalog data directory */
join_path_components(path, backup_path, BACKUPS_DIR);
dir_create_dir(path, DIR_PERMISSION);
/* create backup catalog wal directory */
join_path_components(arclog_path_dir, backup_path, "wal");
dir_create_dir(arclog_path_dir, DIR_PERMISSION);
elog(INFO, "Backup catalog '%s' successfully inited", backup_path);
return 0;
}
int
do_add_instance(void)
{
char path[MAXPGPATH];
char arclog_path_dir[MAXPGPATH];
struct stat st;
pgBackupConfig *config = pgut_new(pgBackupConfig);
/* PGDATA is always required */
if (pgdata == NULL)
elog(ERROR, "Required parameter not specified: PGDATA "
"(-D, --pgdata)");
/* Read system_identifier from PGDATA */
system_identifier = get_system_identifier(pgdata);
/* Starting from PostgreSQL 11 read WAL segment size from PGDATA */
xlog_seg_size = get_xlog_seg_size(pgdata);
/* Ensure that all root directories already exist */
if (access(backup_path, F_OK) != 0)
elog(ERROR, "%s directory does not exist.", backup_path);
join_path_components(path, backup_path, BACKUPS_DIR);
if (access(path, F_OK) != 0)
elog(ERROR, "%s directory does not exist.", path);
join_path_components(arclog_path_dir, backup_path, "wal");
if (access(arclog_path_dir, F_OK) != 0)
elog(ERROR, "%s directory does not exist.", arclog_path_dir);
/* Create directory for data files of this specific instance */
if (stat(backup_instance_path, &st) == 0 && S_ISDIR(st.st_mode))
elog(ERROR, "instance '%s' already exists", backup_instance_path);
dir_create_dir(backup_instance_path, DIR_PERMISSION);
/*
* Create directory for wal files of this specific instance.
* Existence check is extra paranoid because if we don't have such a
* directory in data dir, we shouldn't have it in wal as well.
*/
if (stat(arclog_path, &st) == 0 && S_ISDIR(st.st_mode))
elog(ERROR, "arclog_path '%s' already exists", arclog_path);
dir_create_dir(arclog_path, DIR_PERMISSION);
/*
* Wite initial config. system-identifier and pgdata are set in
* init subcommand and will never be updated.
*/
pgBackupConfigInit(config);
config->system_identifier = system_identifier;
config->xlog_seg_size = xlog_seg_size;
config->pgdata = pgdata;
writeBackupCatalogConfigFile(config);
elog(INFO, "Instance '%s' successfully inited", instance_name);
return 0;
}

526
src/merge.c Normal file
View File

@ -0,0 +1,526 @@
/*-------------------------------------------------------------------------
*
* merge.c: merge FULL and incremental backups
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <sys/stat.h>
#include <unistd.h>
#include "utils/thread.h"
typedef struct
{
parray *to_files;
parray *files;
pgBackup *to_backup;
pgBackup *from_backup;
const char *to_root;
const char *from_root;
/*
* Return value from the thread.
* 0 means there is no error, 1 - there is an error.
*/
int ret;
} merge_files_arg;
static void merge_backups(pgBackup *backup, pgBackup *next_backup);
static void *merge_files(void *arg);
/*
* Implementation of MERGE command.
*
* - Find target and its parent full backup
* - Merge data files of target, parent and and intermediate backups
* - Remove unnecessary files, which doesn't exist in the target backup anymore
*/
void
do_merge(time_t backup_id)
{
parray *backups;
pgBackup *dest_backup = NULL;
pgBackup *full_backup = NULL;
time_t prev_parent = INVALID_BACKUP_ID;
int i;
int dest_backup_idx = 0;
int full_backup_idx = 0;
if (backup_id == INVALID_BACKUP_ID)
elog(ERROR, "required parameter is not specified: --backup-id");
if (instance_name == NULL)
elog(ERROR, "required parameter is not specified: --instance");
elog(LOG, "Merge started");
catalog_lock();
/* Get list of all backups sorted in order of descending start time */
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
/* Find destination and parent backups */
for (i = 0; i < parray_num(backups); i++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
if (backup->start_time > backup_id)
continue;
else if (backup->start_time == backup_id && !dest_backup)
{
if (backup->status != BACKUP_STATUS_OK)
elog(ERROR, "Backup %s has status: %s",
base36enc(backup->start_time), status2str(backup->status));
if (backup->backup_mode == BACKUP_MODE_FULL)
elog(ERROR, "Backup %s if full backup",
base36enc(backup->start_time));
dest_backup = backup;
dest_backup_idx = i;
}
else
{
Assert(dest_backup);
if (backup->start_time != prev_parent)
continue;
if (backup->status != BACKUP_STATUS_OK)
elog(ERROR, "Skipping backup %s, because it has non-valid status: %s",
base36enc(backup->start_time), status2str(backup->status));
/* If we already found dest_backup, look for full backup */
if (dest_backup && backup->backup_mode == BACKUP_MODE_FULL)
{
if (backup->status != BACKUP_STATUS_OK)
elog(ERROR, "Parent full backup %s for the given backup %s has status: %s",
base36enc_dup(backup->start_time),
base36enc_dup(dest_backup->start_time),
status2str(backup->status));
full_backup = backup;
full_backup_idx = i;
/* Found target and full backups, so break the loop */
break;
}
}
prev_parent = backup->parent_backup;
}
if (dest_backup == NULL)
elog(ERROR, "Target backup %s was not found", base36enc(backup_id));
if (full_backup == NULL)
elog(ERROR, "Parent full backup for the given backup %s was not found",
base36enc(backup_id));
Assert(full_backup_idx != dest_backup_idx);
/*
* Found target and full backups, merge them and intermediate backups
*/
for (i = full_backup_idx; i > dest_backup_idx; i--)
{
pgBackup *to_backup = (pgBackup *) parray_get(backups, i);
pgBackup *from_backup = (pgBackup *) parray_get(backups, i - 1);
merge_backups(to_backup, from_backup);
}
/* cleanup */
parray_walk(backups, pgBackupFree);
parray_free(backups);
elog(LOG, "Merge completed");
}
/*
* Merge two backups data files using threads.
* - move instance files from from_backup to to_backup
* - remove unnecessary directories and files from to_backup
* - update metadata of from_backup, it becames FULL backup
*/
static void
merge_backups(pgBackup *to_backup, pgBackup *from_backup)
{
char *to_backup_id = base36enc_dup(to_backup->start_time),
*from_backup_id = base36enc_dup(from_backup->start_time);
char to_backup_path[MAXPGPATH],
to_database_path[MAXPGPATH],
from_backup_path[MAXPGPATH],
from_database_path[MAXPGPATH],
control_file[MAXPGPATH];
parray *files,
*to_files;
pthread_t *threads;
merge_files_arg *threads_args;
int i;
bool merge_isok = true;
elog(LOG, "Merging backup %s with backup %s", from_backup_id, to_backup_id);
to_backup->status = BACKUP_STATUS_MERGING;
pgBackupWriteBackupControlFile(to_backup);
from_backup->status = BACKUP_STATUS_MERGING;
pgBackupWriteBackupControlFile(from_backup);
/*
* Make backup paths.
*/
pgBackupGetPath(to_backup, to_backup_path, lengthof(to_backup_path), NULL);
pgBackupGetPath(to_backup, to_database_path, lengthof(to_database_path),
DATABASE_DIR);
pgBackupGetPath(from_backup, from_backup_path, lengthof(from_backup_path), NULL);
pgBackupGetPath(from_backup, from_database_path, lengthof(from_database_path),
DATABASE_DIR);
create_data_directories(to_database_path, from_backup_path, false);
/*
* Get list of files which will be modified or removed.
*/
pgBackupGetPath(to_backup, control_file, lengthof(control_file),
DATABASE_FILE_LIST);
to_files = dir_read_file_list(from_database_path, /* Use from_database_path
* so root path will be
* equal with 'files' */
control_file);
/* To delete from leaf, sort in reversed order */
parray_qsort(to_files, pgFileComparePathDesc);
/*
* Get list of files which need to be moved.
*/
pgBackupGetPath(from_backup, control_file, lengthof(control_file),
DATABASE_FILE_LIST);
files = dir_read_file_list(from_database_path, control_file);
/* sort by size for load balancing */
parray_qsort(files, pgFileCompareSize);
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
threads_args = (merge_files_arg *) palloc(sizeof(merge_files_arg) * num_threads);
/* Setup threads */
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
pg_atomic_init_flag(&file->lock);
}
for (i = 0; i < num_threads; i++)
{
merge_files_arg *arg = &(threads_args[i]);
arg->to_files = to_files;
arg->files = files;
arg->to_backup = to_backup;
arg->from_backup = from_backup;
arg->to_root = to_database_path;
arg->from_root = from_database_path;
/* By default there are some error */
arg->ret = 1;
elog(VERBOSE, "Start thread: %d", i);
pthread_create(&threads[i], NULL, merge_files, arg);
}
/* Wait threads */
for (i = 0; i < num_threads; i++)
{
pthread_join(threads[i], NULL);
if (threads_args[i].ret == 1)
merge_isok = false;
}
if (!merge_isok)
elog(ERROR, "Data files merging failed");
/*
* Files were copied into to_backup and deleted from from_backup. Remove
* remaining directories from from_backup.
*/
parray_qsort(files, pgFileComparePathDesc);
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
if (!S_ISDIR(file->mode))
continue;
if (rmdir(file->path))
elog(ERROR, "Could not remove directory \"%s\": %s",
file->path, strerror(errno));
}
if (rmdir(from_database_path))
elog(ERROR, "Could not remove directory \"%s\": %s",
from_database_path, strerror(errno));
if (unlink(control_file))
elog(ERROR, "Could not remove file \"%s\": %s",
control_file, strerror(errno));
pgBackupGetPath(from_backup, control_file, lengthof(control_file),
BACKUP_CONTROL_FILE);
if (unlink(control_file))
elog(ERROR, "Could not remove file \"%s\": %s",
control_file, strerror(errno));
if (rmdir(from_backup_path))
elog(ERROR, "Could not remove directory \"%s\": %s",
from_backup_path, strerror(errno));
/*
* Delete files which are not in from_backup file list.
*/
for (i = 0; i < parray_num(to_files); i++)
{
pgFile *file = (pgFile *) parray_get(to_files, i);
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
{
pgFileDelete(file);
elog(LOG, "Deleted \"%s\"", file->path);
}
}
/*
* Rename FULL backup directory.
*/
if (rename(to_backup_path, from_backup_path) == -1)
elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s",
to_backup_path, from_backup_path, strerror(errno));
/*
* Update to_backup metadata.
*/
pgBackupCopy(to_backup, from_backup);
/* Correct metadata */
to_backup->backup_mode = BACKUP_MODE_FULL;
to_backup->status = BACKUP_STATUS_OK;
to_backup->parent_backup = INVALID_BACKUP_ID;
/* Compute summary of size of regular files in the backup */
to_backup->data_bytes = 0;
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
if (S_ISDIR(file->mode))
to_backup->data_bytes += 4096;
/* Count the amount of the data actually copied */
else if (S_ISREG(file->mode))
to_backup->data_bytes += file->write_size;
}
/* compute size of wal files of this backup stored in the archive */
if (!to_backup->stream)
to_backup->wal_bytes = xlog_seg_size *
(to_backup->stop_lsn / xlog_seg_size -
to_backup->start_lsn / xlog_seg_size + 1);
else
to_backup->wal_bytes = BYTES_INVALID;
pgBackupWriteFileList(to_backup, files, from_database_path);
pgBackupWriteBackupControlFile(to_backup);
/* Cleanup */
pfree(threads_args);
pfree(threads);
parray_walk(to_files, pgFileFree);
parray_free(to_files);
parray_walk(files, pgFileFree);
parray_free(files);
pfree(to_backup_id);
pfree(from_backup_id);
}
/*
* Thread worker of merge_backups().
*/
static void *
merge_files(void *arg)
{
merge_files_arg *argument = (merge_files_arg *) arg;
pgBackup *to_backup = argument->to_backup;
pgBackup *from_backup = argument->from_backup;
char tmp_file_path[MAXPGPATH];
int i,
num_files = parray_num(argument->files);
int to_root_len = strlen(argument->to_root);
if (to_backup->compress_alg == PGLZ_COMPRESS ||
to_backup->compress_alg == ZLIB_COMPRESS)
join_path_components(tmp_file_path, argument->to_root, "tmp");
for (i = 0; i < num_files; i++)
{
pgFile *file = (pgFile *) parray_get(argument->files, i);
if (!pg_atomic_test_set_flag(&file->lock))
continue;
/* check for interrupt */
if (interrupted)
elog(ERROR, "Interrupted during merging backups");
if (progress)
elog(LOG, "Progress: (%d/%d). Process file \"%s\"",
i + 1, num_files, file->path);
/*
* Skip files which haven't changed since previous backup. But in case
* of DELTA backup we should consider n_blocks to truncate the target
* backup.
*/
if (file->write_size == BYTES_INVALID &&
file->n_blocks == -1)
{
elog(VERBOSE, "Skip merging file \"%s\", the file didn't change",
file->path);
/*
* If the file wasn't changed in PAGE backup, retreive its
* write_size from previous FULL backup.
*/
if (S_ISREG(file->mode))
{
pgFile **res_file;
res_file = parray_bsearch(argument->to_files, file,
pgFileComparePathDesc);
if (res_file && *res_file)
{
file->compress_alg = (*res_file)->compress_alg;
file->write_size = (*res_file)->write_size;
file->crc = (*res_file)->crc;
}
}
continue;
}
/* Directories were created before */
if (S_ISDIR(file->mode))
continue;
/*
* Move the file. We need to decompress it and compress again if
* necessary.
*/
elog(VERBOSE, "Moving file \"%s\", is_datafile %d, is_cfs %d",
file->path, file->is_database, file->is_cfs);
if (file->is_datafile && !file->is_cfs)
{
char to_path_tmp[MAXPGPATH]; /* Path of target file */
join_path_components(to_path_tmp, argument->to_root,
file->path + to_root_len + 1);
/*
* We need more complicate algorithm if target file exists and it is
* compressed.
*/
if (to_backup->compress_alg == PGLZ_COMPRESS ||
to_backup->compress_alg == ZLIB_COMPRESS)
{
char *prev_path;
/* Start the magic */
/*
* Merge files:
* - decompress first file
* - decompress second file and merge with first decompressed file
* - compress result file
*/
elog(VERBOSE, "File is compressed, decompress to the temporary file \"%s\"",
tmp_file_path);
prev_path = file->path;
/*
* We need to decompress target file only if it exists.
*/
if (fileExists(to_path_tmp))
{
/*
* file->path points to the file in from_root directory. But we
* need the file in directory to_root.
*/
file->path = to_path_tmp;
/* Decompress first/target file */
restore_data_file(tmp_file_path, file, false, false);
file->path = prev_path;
}
/* Merge second/source file with first/target file */
restore_data_file(tmp_file_path, file,
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
false);
elog(VERBOSE, "Compress file and save it to the directory \"%s\"",
argument->to_root);
/* Again we need change path */
file->path = tmp_file_path;
/* backup_data_file() requires file size to calculate nblocks */
file->size = pgFileSize(file->path);
/* Now we can compress the file */
backup_data_file(NULL, /* We shouldn't need 'arguments' here */
to_path_tmp, file,
to_backup->start_lsn,
to_backup->backup_mode,
to_backup->compress_alg,
to_backup->compress_level);
file->path = prev_path;
/* We can remove temporary file now */
if (unlink(tmp_file_path))
elog(ERROR, "Could not remove temporary file \"%s\": %s",
tmp_file_path, strerror(errno));
}
/*
* Otherwise merging algorithm is simpler.
*/
else
{
/* We can merge in-place here */
restore_data_file(to_path_tmp, file,
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
true);
/*
* We need to calculate write_size, restore_data_file() doesn't
* do that.
*/
file->write_size = pgFileSize(to_path_tmp);
file->crc = pgFileGetCRC(to_path_tmp);
}
pgFileDelete(file);
}
else
move_file(argument->from_root, argument->to_root, file);
if (file->write_size != BYTES_INVALID)
elog(LOG, "Moved file \"%s\": " INT64_FORMAT " bytes",
file->path, file->write_size);
}
/* Data files merging is successful */
argument->ret = 0;
return NULL;
}

1039
src/parsexlog.c Normal file

File diff suppressed because it is too large Load Diff

634
src/pg_probackup.c Normal file
View File

@ -0,0 +1,634 @@
/*-------------------------------------------------------------------------
*
* pg_probackup.c: Backup/Recovery manager for PostgreSQL.
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include "streamutil.h"
#include "utils/thread.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/stat.h>
#include <unistd.h>
#include "pg_getopt.h"
const char *PROGRAM_VERSION = "2.0.18";
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
/* directory options */
char *backup_path = NULL;
char *pgdata = NULL;
/*
* path or to the data files in the backup catalog
* $BACKUP_PATH/backups/instance_name
*/
char backup_instance_path[MAXPGPATH];
/*
* path or to the wal files in the backup catalog
* $BACKUP_PATH/wal/instance_name
*/
char arclog_path[MAXPGPATH] = "";
/* common options */
static char *backup_id_string = NULL;
int num_threads = 1;
bool stream_wal = false;
bool progress = false;
#if PG_VERSION_NUM >= 100000
char *replication_slot = NULL;
#endif
/* backup options */
bool backup_logs = false;
bool smooth_checkpoint;
bool is_remote_backup = false;
/* Wait timeout for WAL segment archiving */
uint32 archive_timeout = ARCHIVE_TIMEOUT_DEFAULT;
const char *master_db = NULL;
const char *master_host = NULL;
const char *master_port= NULL;
const char *master_user = NULL;
uint32 replica_timeout = REPLICA_TIMEOUT_DEFAULT;
/* restore options */
static char *target_time;
static char *target_xid;
static char *target_lsn;
static char *target_inclusive;
static TimeLineID target_tli;
static bool target_immediate;
static char *target_name = NULL;
static char *target_action = NULL;
static pgRecoveryTarget *recovery_target_options = NULL;
bool restore_as_replica = false;
bool restore_no_validate = false;
/* delete options */
bool delete_wal = false;
bool delete_expired = false;
bool apply_to_all = false;
bool force_delete = false;
/* retention options */
uint32 retention_redundancy = 0;
uint32 retention_window = 0;
/* compression options */
CompressAlg compress_alg = COMPRESS_ALG_DEFAULT;
int compress_level = COMPRESS_LEVEL_DEFAULT;
bool compress_shortcut = false;
/* other options */
char *instance_name;
uint64 system_identifier = 0;
/*
* Starting from PostgreSQL 11 WAL segment size may vary. Prior to
* PostgreSQL 10 xlog_seg_size is equal to XLOG_SEG_SIZE.
*/
#if PG_VERSION_NUM >= 110000
uint32 xlog_seg_size = 0;
#else
uint32 xlog_seg_size = XLOG_SEG_SIZE;
#endif
/* archive push options */
static char *wal_file_path;
static char *wal_file_name;
static bool file_overwrite = false;
/* show options */
ShowFormat show_format = SHOW_PLAIN;
/* current settings */
pgBackup current;
ProbackupSubcmd backup_subcmd = NO_CMD;
static bool help_opt = false;
static void opt_backup_mode(pgut_option *opt, const char *arg);
static void opt_log_level_console(pgut_option *opt, const char *arg);
static void opt_log_level_file(pgut_option *opt, const char *arg);
static void opt_compress_alg(pgut_option *opt, const char *arg);
static void opt_show_format(pgut_option *opt, const char *arg);
static void compress_init(void);
static pgut_option options[] =
{
/* directory options */
{ 'b', 1, "help", &help_opt, SOURCE_CMDLINE },
{ 's', 'D', "pgdata", &pgdata, SOURCE_CMDLINE },
{ 's', 'B', "backup-path", &backup_path, SOURCE_CMDLINE },
/* common options */
{ 'u', 'j', "threads", &num_threads, SOURCE_CMDLINE },
{ 'b', 2, "stream", &stream_wal, SOURCE_CMDLINE },
{ 'b', 3, "progress", &progress, SOURCE_CMDLINE },
{ 's', 'i', "backup-id", &backup_id_string, SOURCE_CMDLINE },
/* backup options */
{ 'b', 10, "backup-pg-log", &backup_logs, SOURCE_CMDLINE },
{ 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMDLINE },
{ 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMDLINE },
{ 's', 'S', "slot", &replication_slot, SOURCE_CMDLINE },
{ 'u', 11, "archive-timeout", &archive_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
{ 'b', 12, "delete-wal", &delete_wal, SOURCE_CMDLINE },
{ 'b', 13, "delete-expired", &delete_expired, SOURCE_CMDLINE },
{ 's', 14, "master-db", &master_db, SOURCE_CMDLINE, },
{ 's', 15, "master-host", &master_host, SOURCE_CMDLINE, },
{ 's', 16, "master-port", &master_port, SOURCE_CMDLINE, },
{ 's', 17, "master-user", &master_user, SOURCE_CMDLINE, },
{ 'u', 18, "replica-timeout", &replica_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
/* TODO not completed feature. Make it unavailiable from user level
{ 'b', 18, "remote", &is_remote_backup, SOURCE_CMDLINE, }, */
/* restore options */
{ 's', 20, "time", &target_time, SOURCE_CMDLINE },
{ 's', 21, "xid", &target_xid, SOURCE_CMDLINE },
{ 's', 22, "inclusive", &target_inclusive, SOURCE_CMDLINE },
{ 'u', 23, "timeline", &target_tli, SOURCE_CMDLINE },
{ 'f', 'T', "tablespace-mapping", opt_tablespace_map, SOURCE_CMDLINE },
{ 'b', 24, "immediate", &target_immediate, SOURCE_CMDLINE },
{ 's', 25, "recovery-target-name", &target_name, SOURCE_CMDLINE },
{ 's', 26, "recovery-target-action", &target_action, SOURCE_CMDLINE },
{ 'b', 'R', "restore-as-replica", &restore_as_replica, SOURCE_CMDLINE },
{ 'b', 27, "no-validate", &restore_no_validate, SOURCE_CMDLINE },
{ 's', 28, "lsn", &target_lsn, SOURCE_CMDLINE },
/* delete options */
{ 'b', 130, "wal", &delete_wal, SOURCE_CMDLINE },
{ 'b', 131, "expired", &delete_expired, SOURCE_CMDLINE },
{ 'b', 132, "all", &apply_to_all, SOURCE_CMDLINE },
/* TODO not implemented yet */
{ 'b', 133, "force", &force_delete, SOURCE_CMDLINE },
/* retention options */
{ 'u', 134, "retention-redundancy", &retention_redundancy, SOURCE_CMDLINE },
{ 'u', 135, "retention-window", &retention_window, SOURCE_CMDLINE },
/* compression options */
{ 'f', 136, "compress-algorithm", opt_compress_alg, SOURCE_CMDLINE },
{ 'u', 137, "compress-level", &compress_level, SOURCE_CMDLINE },
{ 'b', 138, "compress", &compress_shortcut, SOURCE_CMDLINE },
/* logging options */
{ 'f', 140, "log-level-console", opt_log_level_console, SOURCE_CMDLINE },
{ 'f', 141, "log-level-file", opt_log_level_file, SOURCE_CMDLINE },
{ 's', 142, "log-filename", &log_filename, SOURCE_CMDLINE },
{ 's', 143, "error-log-filename", &error_log_filename, SOURCE_CMDLINE },
{ 's', 144, "log-directory", &log_directory, SOURCE_CMDLINE },
{ 'u', 145, "log-rotation-size", &log_rotation_size, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
{ 'u', 146, "log-rotation-age", &log_rotation_age, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MIN },
/* connection options */
{ 's', 'd', "pgdatabase", &pgut_dbname, SOURCE_CMDLINE },
{ 's', 'h', "pghost", &host, SOURCE_CMDLINE },
{ 's', 'p', "pgport", &port, SOURCE_CMDLINE },
{ 's', 'U', "pguser", &username, SOURCE_CMDLINE },
{ 'B', 'w', "no-password", &prompt_password, SOURCE_CMDLINE },
{ 'b', 'W', "password", &force_password, SOURCE_CMDLINE },
/* other options */
{ 'U', 150, "system-identifier", &system_identifier, SOURCE_FILE_STRICT },
{ 's', 151, "instance", &instance_name, SOURCE_CMDLINE },
#if PG_VERSION_NUM >= 110000
{ 'u', 152, "xlog-seg-size", &xlog_seg_size, SOURCE_FILE_STRICT},
#endif
/* archive-push options */
{ 's', 160, "wal-file-path", &wal_file_path, SOURCE_CMDLINE },
{ 's', 161, "wal-file-name", &wal_file_name, SOURCE_CMDLINE },
{ 'b', 162, "overwrite", &file_overwrite, SOURCE_CMDLINE },
/* show options */
{ 'f', 170, "format", opt_show_format, SOURCE_CMDLINE },
{ 0 }
};
/*
* Entry point of pg_probackup command.
*/
int
main(int argc, char *argv[])
{
char *command = NULL,
*command_name;
/* Check if backup_path is directory. */
struct stat stat_buf;
int rc;
/* initialize configuration */
pgBackupInit(&current);
PROGRAM_NAME = get_progname(argv[0]);
set_pglocale_pgservice(argv[0], "pgscripts");
#if PG_VERSION_NUM >= 110000
/*
* Reset WAL segment size, we will retreive it using RetrieveWalSegSize()
* later.
*/
WalSegSz = 0;
#endif
/*
* Save main thread's tid. It is used call exit() in case of errors.
*/
main_tid = pthread_self();
/* Parse subcommands and non-subcommand options */
if (argc > 1)
{
if (strcmp(argv[1], "archive-push") == 0)
backup_subcmd = ARCHIVE_PUSH_CMD;
else if (strcmp(argv[1], "archive-get") == 0)
backup_subcmd = ARCHIVE_GET_CMD;
else if (strcmp(argv[1], "add-instance") == 0)
backup_subcmd = ADD_INSTANCE_CMD;
else if (strcmp(argv[1], "del-instance") == 0)
backup_subcmd = DELETE_INSTANCE_CMD;
else if (strcmp(argv[1], "init") == 0)
backup_subcmd = INIT_CMD;
else if (strcmp(argv[1], "backup") == 0)
backup_subcmd = BACKUP_CMD;
else if (strcmp(argv[1], "restore") == 0)
backup_subcmd = RESTORE_CMD;
else if (strcmp(argv[1], "validate") == 0)
backup_subcmd = VALIDATE_CMD;
else if (strcmp(argv[1], "delete") == 0)
backup_subcmd = DELETE_CMD;
else if (strcmp(argv[1], "merge") == 0)
backup_subcmd = MERGE_CMD;
else if (strcmp(argv[1], "show") == 0)
backup_subcmd = SHOW_CMD;
else if (strcmp(argv[1], "set-config") == 0)
backup_subcmd = SET_CONFIG_CMD;
else if (strcmp(argv[1], "show-config") == 0)
backup_subcmd = SHOW_CONFIG_CMD;
else if (strcmp(argv[1], "--help") == 0 ||
strcmp(argv[1], "-?") == 0 ||
strcmp(argv[1], "help") == 0)
{
if (argc > 2)
help_command(argv[2]);
else
help_pg_probackup();
}
else if (strcmp(argv[1], "--version") == 0
|| strcmp(argv[1], "version") == 0
|| strcmp(argv[1], "-V") == 0)
{
#ifdef PGPRO_VERSION
fprintf(stderr, "%s %s (Postgres Pro %s %s)\n",
PROGRAM_NAME, PROGRAM_VERSION,
PGPRO_VERSION, PGPRO_EDITION);
#else
fprintf(stderr, "%s %s (PostgreSQL %s)\n",
PROGRAM_NAME, PROGRAM_VERSION, PG_VERSION);
#endif
exit(0);
}
else
elog(ERROR, "Unknown subcommand \"%s\"", argv[1]);
}
if (backup_subcmd == NO_CMD)
elog(ERROR, "No subcommand specified");
/*
* Make command string before getopt_long() will call. It permutes the
* content of argv.
*/
command_name = pstrdup(argv[1]);
if (backup_subcmd == BACKUP_CMD ||
backup_subcmd == RESTORE_CMD ||
backup_subcmd == VALIDATE_CMD ||
backup_subcmd == DELETE_CMD ||
backup_subcmd == MERGE_CMD)
{
int i,
len = 0,
allocated = 0;
allocated = sizeof(char) * MAXPGPATH;
command = (char *) palloc(allocated);
for (i = 0; i < argc; i++)
{
int arglen = strlen(argv[i]);
if (arglen + len > allocated)
{
allocated *= 2;
command = repalloc(command, allocated);
}
strncpy(command + len, argv[i], arglen);
len += arglen;
command[len++] = ' ';
}
command[len] = '\0';
}
optind += 1;
/* Parse command line arguments */
pgut_getopt(argc, argv, options);
if (help_opt)
help_command(command_name);
/* backup_path is required for all pg_probackup commands except help */
if (backup_path == NULL)
{
/*
* If command line argument is not set, try to read BACKUP_PATH
* from environment variable
*/
backup_path = getenv("BACKUP_PATH");
if (backup_path == NULL)
elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)");
}
canonicalize_path(backup_path);
/* Ensure that backup_path is an absolute path */
if (!is_absolute_path(backup_path))
elog(ERROR, "-B, --backup-path must be an absolute path");
/* Ensure that backup_path is a path to a directory */
rc = stat(backup_path, &stat_buf);
if (rc != -1 && !S_ISDIR(stat_buf.st_mode))
elog(ERROR, "-B, --backup-path must be a path to directory");
/* command was initialized for a few commands */
if (command)
{
elog_file(INFO, "command: %s", command);
pfree(command);
command = NULL;
}
/* Option --instance is required for all commands except init and show */
if (backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD &&
backup_subcmd != VALIDATE_CMD)
{
if (instance_name == NULL)
elog(ERROR, "required parameter not specified: --instance");
}
/*
* If --instance option was passed, construct paths for backup data and
* xlog files of this backup instance.
*/
if (instance_name)
{
sprintf(backup_instance_path, "%s/%s/%s",
backup_path, BACKUPS_DIR, instance_name);
sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name);
/*
* Ensure that requested backup instance exists.
* for all commands except init, which doesn't take this parameter
* and add-instance which creates new instance.
*/
if (backup_subcmd != INIT_CMD && backup_subcmd != ADD_INSTANCE_CMD)
{
if (access(backup_instance_path, F_OK) != 0)
elog(ERROR, "Instance '%s' does not exist in this backup catalog",
instance_name);
}
}
/*
* Read options from env variables or from config file,
* unless we're going to set them via set-config.
*/
if (instance_name && backup_subcmd != SET_CONFIG_CMD)
{
char path[MAXPGPATH];
/* Read environment variables */
pgut_getopt_env(options);
/* Read options from configuration file */
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
pgut_readopt(path, options, ERROR, true);
}
/* Initialize logger */
init_logger(backup_path);
/*
* We have read pgdata path from command line or from configuration file.
* Ensure that pgdata is an absolute path.
*/
if (pgdata != NULL && !is_absolute_path(pgdata))
elog(ERROR, "-D, --pgdata must be an absolute path");
#if PG_VERSION_NUM >= 110000
/* Check xlog-seg-size option */
if (instance_name &&
backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD &&
backup_subcmd != ADD_INSTANCE_CMD && !IsValidWalSegSize(xlog_seg_size))
elog(ERROR, "Invalid WAL segment size %u", xlog_seg_size);
#endif
/* Sanity check of --backup-id option */
if (backup_id_string != NULL)
{
if (backup_subcmd != RESTORE_CMD &&
backup_subcmd != VALIDATE_CMD &&
backup_subcmd != DELETE_CMD &&
backup_subcmd != MERGE_CMD &&
backup_subcmd != SHOW_CMD)
elog(ERROR, "Cannot use -i (--backup-id) option together with the \"%s\" command",
command_name);
current.backup_id = base36dec(backup_id_string);
if (current.backup_id == 0)
elog(ERROR, "Invalid backup-id \"%s\"", backup_id_string);
}
/* Setup stream options. They are used in streamutil.c. */
if (host != NULL)
dbhost = pstrdup(host);
if (port != NULL)
dbport = pstrdup(port);
if (username != NULL)
dbuser = pstrdup(username);
/* setup exclusion list for file search */
if (!backup_logs)
{
int i;
for (i = 0; pgdata_exclude_dir[i]; i++); /* find first empty slot */
/* Set 'pg_log' in first empty slot */
pgdata_exclude_dir[i] = "pg_log";
}
if (backup_subcmd == VALIDATE_CMD || backup_subcmd == RESTORE_CMD)
{
/* parse all recovery target options into recovery_target_options structure */
recovery_target_options = parseRecoveryTargetOptions(target_time, target_xid,
target_inclusive, target_tli, target_lsn, target_immediate,
target_name, target_action, restore_no_validate);
}
if (num_threads < 1)
num_threads = 1;
compress_init();
/* do actual operation */
switch (backup_subcmd)
{
case ARCHIVE_PUSH_CMD:
return do_archive_push(wal_file_path, wal_file_name, file_overwrite);
case ARCHIVE_GET_CMD:
return do_archive_get(wal_file_path, wal_file_name);
case ADD_INSTANCE_CMD:
return do_add_instance();
case DELETE_INSTANCE_CMD:
return do_delete_instance();
case INIT_CMD:
return do_init();
case BACKUP_CMD:
{
const char *backup_mode;
time_t start_time;
start_time = time(NULL);
backup_mode = deparse_backup_mode(current.backup_mode);
current.stream = stream_wal;
elog(INFO, "Backup start, pg_probackup version: %s, backup ID: %s, backup mode: %s, instance: %s, stream: %s, remote: %s",
PROGRAM_VERSION, base36enc(start_time), backup_mode, instance_name,
stream_wal ? "true" : "false", is_remote_backup ? "true" : "false");
return do_backup(start_time);
}
case RESTORE_CMD:
return do_restore_or_validate(current.backup_id,
recovery_target_options,
true);
case VALIDATE_CMD:
if (current.backup_id == 0 && target_time == 0 && target_xid == 0)
return do_validate_all();
else
return do_restore_or_validate(current.backup_id,
recovery_target_options,
false);
case SHOW_CMD:
return do_show(current.backup_id);
case DELETE_CMD:
if (delete_expired && backup_id_string)
elog(ERROR, "You cannot specify --delete-expired and --backup-id options together");
if (!delete_expired && !delete_wal && !backup_id_string)
elog(ERROR, "You must specify at least one of the delete options: --expired |--wal |--backup_id");
if (delete_wal && !delete_expired && !backup_id_string)
return do_retention_purge();
if (delete_expired)
return do_retention_purge();
else
return do_delete(current.backup_id);
case MERGE_CMD:
do_merge(current.backup_id);
break;
case SHOW_CONFIG_CMD:
return do_configure(true);
case SET_CONFIG_CMD:
return do_configure(false);
case NO_CMD:
/* Should not happen */
elog(ERROR, "Unknown subcommand");
}
return 0;
}
static void
opt_backup_mode(pgut_option *opt, const char *arg)
{
current.backup_mode = parse_backup_mode(arg);
}
static void
opt_log_level_console(pgut_option *opt, const char *arg)
{
log_level_console = parse_log_level(arg);
}
static void
opt_log_level_file(pgut_option *opt, const char *arg)
{
log_level_file = parse_log_level(arg);
}
static void
opt_show_format(pgut_option *opt, const char *arg)
{
const char *v = arg;
size_t len;
/* Skip all spaces detected */
while (IsSpace(*v))
v++;
len = strlen(v);
if (len > 0)
{
if (pg_strncasecmp("plain", v, len) == 0)
show_format = SHOW_PLAIN;
else if (pg_strncasecmp("json", v, len) == 0)
show_format = SHOW_JSON;
else
elog(ERROR, "Invalid show format \"%s\"", arg);
}
else
elog(ERROR, "Invalid show format \"%s\"", arg);
}
static void
opt_compress_alg(pgut_option *opt, const char *arg)
{
compress_alg = parse_compress_alg(arg);
}
/*
* Initialize compress and sanity checks for compress.
*/
static void
compress_init(void)
{
/* Default algorithm is zlib */
if (compress_shortcut)
compress_alg = ZLIB_COMPRESS;
if (backup_subcmd != SET_CONFIG_CMD)
{
if (compress_level != COMPRESS_LEVEL_DEFAULT
&& compress_alg == NOT_DEFINED_COMPRESS)
elog(ERROR, "Cannot specify compress-level option without compress-alg option");
}
if (compress_level < 0 || compress_level > 9)
elog(ERROR, "--compress-level value must be in the range from 0 to 9");
if (compress_level == 0)
compress_alg = NOT_DEFINED_COMPRESS;
if (backup_subcmd == BACKUP_CMD || backup_subcmd == ARCHIVE_PUSH_CMD)
{
#ifndef HAVE_LIBZ
if (compress_alg == ZLIB_COMPRESS)
elog(ERROR, "This build does not support zlib compression");
else
#endif
if (compress_alg == PGLZ_COMPRESS && num_threads > 1)
elog(ERROR, "Multithread backup does not support pglz compression");
}
}

620
src/pg_probackup.h Normal file
View File

@ -0,0 +1,620 @@
/*-------------------------------------------------------------------------
*
* pg_probackup.h: Backup/Recovery manager for PostgreSQL.
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#ifndef PG_PROBACKUP_H
#define PG_PROBACKUP_H
#include "postgres_fe.h"
#include <limits.h>
#include <libpq-fe.h>
#include "access/timeline.h"
#include "access/xlogdefs.h"
#include "access/xlog_internal.h"
#include "catalog/pg_control.h"
#include "storage/block.h"
#include "storage/bufpage.h"
#include "storage/checksum.h"
#include "utils/pg_crc.h"
#include "common/relpath.h"
#include "port.h"
#ifdef FRONTEND
#undef FRONTEND
#include "port/atomics.h"
#define FRONTEND
#endif
#include "utils/parray.h"
#include "utils/pgut.h"
#include "datapagemap.h"
# define PG_STOP_BACKUP_TIMEOUT 300
/*
* Macro needed to parse ptrack.
* NOTE Keep those values syncronised with definitions in ptrack.h
*/
#define PTRACK_BITS_PER_HEAPBLOCK 1
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
/* Directory/File names */
#define DATABASE_DIR "database"
#define BACKUPS_DIR "backups"
#if PG_VERSION_NUM >= 100000
#define PG_XLOG_DIR "pg_wal"
#else
#define PG_XLOG_DIR "pg_xlog"
#endif
#define PG_TBLSPC_DIR "pg_tblspc"
#define PG_GLOBAL_DIR "global"
#define BACKUP_CONTROL_FILE "backup.control"
#define BACKUP_CATALOG_CONF_FILE "pg_probackup.conf"
#define BACKUP_CATALOG_PID "pg_probackup.pid"
#define DATABASE_FILE_LIST "backup_content.control"
#define PG_BACKUP_LABEL_FILE "backup_label"
#define PG_BLACK_LIST "black_list"
#define PG_TABLESPACE_MAP_FILE "tablespace_map"
#define LOG_FILENAME_DEFAULT "pg_probackup.log"
#define LOG_DIRECTORY_DEFAULT "log"
/* Direcotry/File permission */
#define DIR_PERMISSION (0700)
#define FILE_PERMISSION (0600)
/* 64-bit xid support for PGPRO_EE */
#ifndef PGPRO_EE
#define XID_FMT "%u"
#endif
typedef enum CompressAlg
{
NOT_DEFINED_COMPRESS = 0,
NONE_COMPRESS,
PGLZ_COMPRESS,
ZLIB_COMPRESS,
} CompressAlg;
/* Information about single file (or dir) in backup */
typedef struct pgFile
{
char *name; /* file or directory name */
mode_t mode; /* protection (file type and permission) */
size_t size; /* size of the file */
size_t read_size; /* size of the portion read (if only some pages are
backed up, it's different from size) */
int64 write_size; /* size of the backed-up file. BYTES_INVALID means
that the file existed but was not backed up
because not modified since last backup. */
/* we need int64 here to store '-1' value */
pg_crc32 crc; /* CRC value of the file, regular file only */
char *linked; /* path of the linked file */
bool is_datafile; /* true if the file is PostgreSQL data file */
char *path; /* absolute path of the file */
Oid tblspcOid; /* tblspcOid extracted from path, if applicable */
Oid dbOid; /* dbOid extracted from path, if applicable */
Oid relOid; /* relOid extracted from path, if applicable */
char *forkName; /* forkName extracted from path, if applicable */
int segno; /* Segment number for ptrack */
int n_blocks; /* size of the file in blocks, readed during DELTA backup */
bool is_cfs; /* Flag to distinguish files compressed by CFS*/
bool is_database;
bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */
CompressAlg compress_alg; /* compression algorithm applied to the file */
volatile pg_atomic_flag lock; /* lock for synchronization of parallel threads */
datapagemap_t pagemap; /* bitmap of pages updated since previous backup */
bool pagemap_isabsent; /* Used to mark files with unknown state of pagemap,
* i.e. datafiles without _ptrack */
} pgFile;
/* Special values of datapagemap_t bitmapsize */
#define PageBitmapIsEmpty 0 /* Used to mark unchanged datafiles */
/* Current state of backup */
typedef enum BackupStatus
{
BACKUP_STATUS_INVALID, /* the pgBackup is invalid */
BACKUP_STATUS_OK, /* completed backup */
BACKUP_STATUS_ERROR, /* aborted because of unexpected error */
BACKUP_STATUS_RUNNING, /* running backup */
BACKUP_STATUS_MERGING, /* merging backups */
BACKUP_STATUS_DELETING, /* data files are being deleted */
BACKUP_STATUS_DELETED, /* data files have been deleted */
BACKUP_STATUS_DONE, /* completed but not validated yet */
BACKUP_STATUS_ORPHAN, /* backup validity is unknown but at least one parent backup is corrupted */
BACKUP_STATUS_CORRUPT /* files are corrupted, not available */
} BackupStatus;
typedef enum BackupMode
{
BACKUP_MODE_INVALID = 0,
BACKUP_MODE_DIFF_PAGE, /* incremental page backup */
BACKUP_MODE_DIFF_PTRACK, /* incremental page backup with ptrack system */
BACKUP_MODE_DIFF_DELTA, /* incremental page backup with lsn comparison */
BACKUP_MODE_FULL /* full backup */
} BackupMode;
typedef enum ProbackupSubcmd
{
NO_CMD = 0,
INIT_CMD,
ADD_INSTANCE_CMD,
DELETE_INSTANCE_CMD,
ARCHIVE_PUSH_CMD,
ARCHIVE_GET_CMD,
BACKUP_CMD,
RESTORE_CMD,
VALIDATE_CMD,
DELETE_CMD,
MERGE_CMD,
SHOW_CMD,
SET_CONFIG_CMD,
SHOW_CONFIG_CMD
} ProbackupSubcmd;
typedef enum ShowFormat
{
SHOW_PLAIN,
SHOW_JSON
} ShowFormat;
/* special values of pgBackup fields */
#define INVALID_BACKUP_ID 0 /* backup ID is not provided by user */
#define BYTES_INVALID (-1)
#define BLOCKNUM_INVALID (-1)
typedef struct pgBackupConfig
{
uint64 system_identifier;
uint32 xlog_seg_size;
char *pgdata;
const char *pgdatabase;
const char *pghost;
const char *pgport;
const char *pguser;
const char *master_host;
const char *master_port;
const char *master_db;
const char *master_user;
int replica_timeout;
int archive_timeout;
int log_level_console;
int log_level_file;
char *log_filename;
char *error_log_filename;
char *log_directory;
int log_rotation_size;
int log_rotation_age;
uint32 retention_redundancy;
uint32 retention_window;
CompressAlg compress_alg;
int compress_level;
} pgBackupConfig;
/* Information about single backup stored in backup.conf */
typedef struct pgBackup pgBackup;
struct pgBackup
{
BackupMode backup_mode; /* Mode - one of BACKUP_MODE_xxx above*/
time_t backup_id; /* Identifier of the backup.
* Currently it's the same as start_time */
BackupStatus status; /* Status - one of BACKUP_STATUS_xxx above*/
TimeLineID tli; /* timeline of start and stop baskup lsns */
XLogRecPtr start_lsn; /* backup's starting transaction log location */
XLogRecPtr stop_lsn; /* backup's finishing transaction log location */
time_t start_time; /* since this moment backup has status
* BACKUP_STATUS_RUNNING */
time_t end_time; /* the moment when backup was finished, or the moment
* when we realized that backup is broken */
time_t recovery_time; /* Earliest moment for which you can restore
* the state of the database cluster using
* this backup */
TransactionId recovery_xid; /* Earliest xid for which you can restore
* the state of the database cluster using
* this backup */
/*
* Amount of raw data. For a full backup, this is the total amount of
* data while for a differential backup this is just the difference
* of data taken.
* BYTES_INVALID means nothing was backed up.
*/
int64 data_bytes;
/* Size of WAL files in archive needed to restore this backup */
int64 wal_bytes;
CompressAlg compress_alg;
int compress_level;
/* Fields needed for compatibility check */
uint32 block_size;
uint32 wal_block_size;
uint32 checksum_version;
char program_version[100];
char server_version[100];
bool stream; /* Was this backup taken in stream mode?
* i.e. does it include all needed WAL files? */
bool from_replica; /* Was this backup taken from replica */
time_t parent_backup; /* Identifier of the previous backup.
* Which is basic backup for this
* incremental backup. */
pgBackup *parent_backup_link;
char *primary_conninfo; /* Connection parameters of the backup
* in the format suitable for recovery.conf */
};
/* Recovery target for restore and validate subcommands */
typedef struct pgRecoveryTarget
{
bool time_specified;
time_t recovery_target_time;
/* add one more field in order to avoid deparsing recovery_target_time back */
const char *target_time_string;
bool xid_specified;
TransactionId recovery_target_xid;
/* add one more field in order to avoid deparsing recovery_target_xid back */
const char *target_xid_string;
bool lsn_specified;
XLogRecPtr recovery_target_lsn;
/* add one more field in order to avoid deparsing recovery_target_lsn back */
const char *target_lsn_string;
TimeLineID recovery_target_tli;
bool recovery_target_inclusive;
bool inclusive_specified;
bool recovery_target_immediate;
const char *recovery_target_name;
const char *recovery_target_action;
bool restore_no_validate;
} pgRecoveryTarget;
/* Union to ease operations on relation pages */
typedef union DataPage
{
PageHeaderData page_data;
char data[BLCKSZ];
} DataPage;
typedef struct
{
const char *from_root;
const char *to_root;
parray *files_list;
parray *prev_filelist;
XLogRecPtr prev_start_lsn;
PGconn *backup_conn;
PGcancel *cancel_conn;
/*
* Return value from the thread.
* 0 means there is no error, 1 - there is an error.
*/
int ret;
} backup_files_arg;
/*
* return pointer that exceeds the length of prefix from character string.
* ex. str="/xxx/yyy/zzz", prefix="/xxx/yyy", return="zzz".
*/
#define GetRelativePath(str, prefix) \
((strlen(str) <= strlen(prefix)) ? "" : str + strlen(prefix) + 1)
/*
* Return timeline, xlog ID and record offset from an LSN of the type
* 0/B000188, usual result from pg_stop_backup() and friends.
*/
#define XLogDataFromLSN(data, xlogid, xrecoff) \
sscanf(data, "%X/%X", xlogid, xrecoff)
#define IsCompressedXLogFileName(fname) \
(strlen(fname) == XLOG_FNAME_LEN + strlen(".gz") && \
strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \
strcmp((fname) + XLOG_FNAME_LEN, ".gz") == 0)
#if PG_VERSION_NUM >= 110000
#define GetXLogSegNo(xlrp, logSegNo, wal_segsz_bytes) \
XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes)
#define GetXLogRecPtr(segno, offset, wal_segsz_bytes, dest) \
XLogSegNoOffsetToRecPtr(segno, offset, wal_segsz_bytes, dest)
#define GetXLogFileName(fname, tli, logSegNo, wal_segsz_bytes) \
XLogFileName(fname, tli, logSegNo, wal_segsz_bytes)
#define IsInXLogSeg(xlrp, logSegNo, wal_segsz_bytes) \
XLByteInSeg(xlrp, logSegNo, wal_segsz_bytes)
#else
#define GetXLogSegNo(xlrp, logSegNo, wal_segsz_bytes) \
XLByteToSeg(xlrp, logSegNo)
#define GetXLogRecPtr(segno, offset, wal_segsz_bytes, dest) \
XLogSegNoOffsetToRecPtr(segno, offset, dest)
#define GetXLogFileName(fname, tli, logSegNo, wal_segsz_bytes) \
XLogFileName(fname, tli, logSegNo)
#define IsInXLogSeg(xlrp, logSegNo, wal_segsz_bytes) \
XLByteInSeg(xlrp, logSegNo)
#endif
/* directory options */
extern char *backup_path;
extern char backup_instance_path[MAXPGPATH];
extern char *pgdata;
extern char arclog_path[MAXPGPATH];
/* common options */
extern int num_threads;
extern bool stream_wal;
extern bool progress;
#if PG_VERSION_NUM >= 100000
/* In pre-10 'replication_slot' is defined in receivelog.h */
extern char *replication_slot;
#endif
/* backup options */
extern bool smooth_checkpoint;
#define ARCHIVE_TIMEOUT_DEFAULT 300
extern uint32 archive_timeout;
extern bool is_remote_backup;
extern const char *master_db;
extern const char *master_host;
extern const char *master_port;
extern const char *master_user;
#define REPLICA_TIMEOUT_DEFAULT 300
extern uint32 replica_timeout;
extern bool is_ptrack_support;
extern bool is_checksum_enabled;
extern bool exclusive_backup;
/* restore options */
extern bool restore_as_replica;
/* delete options */
extern bool delete_wal;
extern bool delete_expired;
extern bool apply_to_all;
extern bool force_delete;
/* retention options. 0 disables the option */
#define RETENTION_REDUNDANCY_DEFAULT 0
#define RETENTION_WINDOW_DEFAULT 0
extern uint32 retention_redundancy;
extern uint32 retention_window;
/* compression options */
extern CompressAlg compress_alg;
extern int compress_level;
extern bool compress_shortcut;
#define COMPRESS_ALG_DEFAULT NOT_DEFINED_COMPRESS
#define COMPRESS_LEVEL_DEFAULT 1
extern CompressAlg parse_compress_alg(const char *arg);
extern const char* deparse_compress_alg(int alg);
/* other options */
extern char *instance_name;
extern uint64 system_identifier;
extern uint32 xlog_seg_size;
/* show options */
extern ShowFormat show_format;
/* current settings */
extern pgBackup current;
extern ProbackupSubcmd backup_subcmd;
/* in dir.c */
/* exclude directory list for $PGDATA file listing */
extern const char *pgdata_exclude_dir[];
/* in backup.c */
extern int do_backup(time_t start_time);
extern BackupMode parse_backup_mode(const char *value);
extern const char *deparse_backup_mode(BackupMode mode);
extern void process_block_change(ForkNumber forknum, RelFileNode rnode,
BlockNumber blkno);
extern char *pg_ptrack_get_block(backup_files_arg *arguments,
Oid dbOid, Oid tblsOid, Oid relOid,
BlockNumber blknum,
size_t *result_size);
/* in restore.c */
extern int do_restore_or_validate(time_t target_backup_id,
pgRecoveryTarget *rt,
bool is_restore);
extern bool satisfy_timeline(const parray *timelines, const pgBackup *backup);
extern bool satisfy_recovery_target(const pgBackup *backup,
const pgRecoveryTarget *rt);
extern parray * readTimeLineHistory_probackup(TimeLineID targetTLI);
extern pgRecoveryTarget *parseRecoveryTargetOptions(
const char *target_time, const char *target_xid,
const char *target_inclusive, TimeLineID target_tli, const char* target_lsn,
bool target_immediate, const char *target_name,
const char *target_action, bool restore_no_validate);
/* in merge.c */
extern void do_merge(time_t backup_id);
/* in init.c */
extern int do_init(void);
extern int do_add_instance(void);
/* in archive.c */
extern int do_archive_push(char *wal_file_path, char *wal_file_name,
bool overwrite);
extern int do_archive_get(char *wal_file_path, char *wal_file_name);
/* in configure.c */
extern int do_configure(bool show_only);
extern void pgBackupConfigInit(pgBackupConfig *config);
extern void writeBackupCatalogConfig(FILE *out, pgBackupConfig *config);
extern void writeBackupCatalogConfigFile(pgBackupConfig *config);
extern pgBackupConfig* readBackupCatalogConfigFile(void);
extern uint32 get_config_xlog_seg_size(void);
/* in show.c */
extern int do_show(time_t requested_backup_id);
/* in delete.c */
extern int do_delete(time_t backup_id);
extern int do_retention_purge(void);
extern int do_delete_instance(void);
/* in fetch.c */
extern char *slurpFile(const char *datadir,
const char *path,
size_t *filesize,
bool safe);
extern char *fetchFile(PGconn *conn, const char *filename, size_t *filesize);
/* in help.c */
extern void help_pg_probackup(void);
extern void help_command(char *command);
/* in validate.c */
extern void pgBackupValidate(pgBackup* backup);
extern int do_validate_all(void);
/* in catalog.c */
extern pgBackup *read_backup(time_t timestamp);
extern const char *pgBackupGetBackupMode(pgBackup *backup);
extern parray *catalog_get_backup_list(time_t requested_backup_id);
extern pgBackup *catalog_get_last_data_backup(parray *backup_list,
TimeLineID tli);
extern void catalog_lock(void);
extern void pgBackupWriteControl(FILE *out, pgBackup *backup);
extern void pgBackupWriteBackupControlFile(pgBackup *backup);
extern void pgBackupWriteFileList(pgBackup *backup, parray *files,
const char *root);
extern void pgBackupGetPath(const pgBackup *backup, char *path, size_t len, const char *subdir);
extern void pgBackupGetPath2(const pgBackup *backup, char *path, size_t len,
const char *subdir1, const char *subdir2);
extern int pgBackupCreateDir(pgBackup *backup);
extern void pgBackupInit(pgBackup *backup);
extern void pgBackupCopy(pgBackup *dst, pgBackup *src);
extern void pgBackupFree(void *backup);
extern int pgBackupCompareId(const void *f1, const void *f2);
extern int pgBackupCompareIdDesc(const void *f1, const void *f2);
extern pgBackup* find_parent_backup(pgBackup *current_backup);
/* in dir.c */
extern void dir_list_file(parray *files, const char *root, bool exclude,
bool omit_symlink, bool add_root);
extern void create_data_directories(const char *data_dir,
const char *backup_dir,
bool extract_tablespaces);
extern void read_tablespace_map(parray *files, const char *backup_dir);
extern void opt_tablespace_map(pgut_option *opt, const char *arg);
extern void check_tablespace_mapping(pgBackup *backup);
extern void print_file_list(FILE *out, const parray *files, const char *root);
extern parray *dir_read_file_list(const char *root, const char *file_txt);
extern int dir_create_dir(const char *path, mode_t mode);
extern bool dir_is_empty(const char *path);
extern bool fileExists(const char *path);
extern size_t pgFileSize(const char *path);
extern pgFile *pgFileNew(const char *path, bool omit_symlink);
extern pgFile *pgFileInit(const char *path);
extern void pgFileDelete(pgFile *file);
extern void pgFileFree(void *file);
extern pg_crc32 pgFileGetCRC(const char *file_path);
extern int pgFileComparePath(const void *f1, const void *f2);
extern int pgFileComparePathDesc(const void *f1, const void *f2);
extern int pgFileCompareLinked(const void *f1, const void *f2);
extern int pgFileCompareSize(const void *f1, const void *f2);
/* in data.c */
extern bool backup_data_file(backup_files_arg* arguments,
const char *to_path, pgFile *file,
XLogRecPtr prev_backup_start_lsn,
BackupMode backup_mode,
CompressAlg calg, int clevel);
extern void restore_data_file(const char *to_path,
pgFile *file, bool allow_truncate,
bool write_header);
extern bool copy_file(const char *from_root, const char *to_root, pgFile *file);
extern void move_file(const char *from_root, const char *to_root, pgFile *file);
extern void push_wal_file(const char *from_path, const char *to_path,
bool is_compress, bool overwrite);
extern void get_wal_file(const char *from_path, const char *to_path);
extern bool calc_file_checksum(pgFile *file);
/* parsexlog.c */
extern void extractPageMap(const char *datadir,
TimeLineID tli, uint32 seg_size,
XLogRecPtr startpoint, XLogRecPtr endpoint,
bool prev_seg, parray *backup_files_list);
extern void validate_wal(pgBackup *backup,
const char *archivedir,
time_t target_time,
TransactionId target_xid,
XLogRecPtr target_lsn,
TimeLineID tli, uint32 seg_size);
extern bool read_recovery_info(const char *archivedir, TimeLineID tli,
uint32 seg_size,
XLogRecPtr start_lsn, XLogRecPtr stop_lsn,
time_t *recovery_time,
TransactionId *recovery_xid);
extern bool wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn,
TimeLineID target_tli, uint32 seg_size);
/* in util.c */
extern TimeLineID get_current_timeline(bool safe);
extern void sanityChecks(void);
extern void time2iso(char *buf, size_t len, time_t time);
extern const char *status2str(BackupStatus status);
extern void remove_trailing_space(char *buf, int comment_mark);
extern void remove_not_digit(char *buf, size_t len, const char *str);
extern uint32 get_data_checksum_version(bool safe);
extern const char *base36enc(long unsigned int value);
extern char *base36enc_dup(long unsigned int value);
extern long unsigned int base36dec(const char *text);
extern uint64 get_system_identifier(char *pgdata);
extern uint64 get_remote_system_identifier(PGconn *conn);
extern uint32 get_xlog_seg_size(char *pgdata_path);
extern pg_time_t timestamptz_to_time_t(TimestampTz t);
extern int parse_server_version(char *server_version_str);
/* in status.c */
extern bool is_pg_running(void);
#ifdef WIN32
#ifdef _DEBUG
#define lseek _lseek
#define open _open
#define fstat _fstat
#define read _read
#define close _close
#define write _write
#define mkdir(dir,mode) _mkdir(dir)
#endif
#endif
#endif /* PG_PROBACKUP_H */

920
src/restore.c Normal file
View File

@ -0,0 +1,920 @@
/*-------------------------------------------------------------------------
*
* restore.c: restore DB cluster and archived WAL.
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <pthread.h>
#include "catalog/pg_control.h"
#include "utils/logger.h"
#include "utils/thread.h"
typedef struct
{
parray *files;
pgBackup *backup;
/*
* Return value from the thread.
* 0 means there is no error, 1 - there is an error.
*/
int ret;
} restore_files_arg;
static void restore_backup(pgBackup *backup);
static void create_recovery_conf(time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup);
static void *restore_files(void *arg);
static void remove_deleted_files(pgBackup *backup);
/*
* Entry point of pg_probackup RESTORE and VALIDATE subcommands.
*/
int
do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
bool is_restore)
{
int i = 0;
parray *backups;
pgBackup *current_backup = NULL;
pgBackup *dest_backup = NULL;
pgBackup *base_full_backup = NULL;
pgBackup *corrupted_backup = NULL;
int dest_backup_index = 0;
int base_full_backup_index = 0;
int corrupted_backup_index = 0;
char *action = is_restore ? "Restore":"Validate";
if (is_restore)
{
if (pgdata == NULL)
elog(ERROR,
"required parameter not specified: PGDATA (-D, --pgdata)");
/* Check if restore destination empty */
if (!dir_is_empty(pgdata))
elog(ERROR, "restore destination is not empty: \"%s\"", pgdata);
}
if (instance_name == NULL)
elog(ERROR, "required parameter not specified: --instance");
elog(LOG, "%s begin.", action);
/* Get exclusive lock of backup catalog */
catalog_lock();
/* Get list of all backups sorted in order of descending start time */
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
/* Find backup range we should restore or validate. */
while ((i < parray_num(backups)) && !dest_backup)
{
current_backup = (pgBackup *) parray_get(backups, i);
i++;
/* Skip all backups which started after target backup */
if (target_backup_id && current_backup->start_time > target_backup_id)
continue;
/*
* [PGPRO-1164] If BACKUP_ID is not provided for restore command,
* we must find the first valid(!) backup.
*/
if (is_restore &&
target_backup_id == INVALID_BACKUP_ID &&
current_backup->status != BACKUP_STATUS_OK)
{
elog(WARNING, "Skipping backup %s, because it has non-valid status: %s",
base36enc(current_backup->start_time), status2str(current_backup->status));
continue;
}
/*
* We found target backup. Check its status and
* ensure that it satisfies recovery target.
*/
if ((target_backup_id == current_backup->start_time
|| target_backup_id == INVALID_BACKUP_ID))
{
/* backup is not ok,
* but in case of CORRUPT, ORPHAN or DONE revalidation can be done,
* in other cases throw an error.
*/
if (current_backup->status != BACKUP_STATUS_OK)
{
if (current_backup->status == BACKUP_STATUS_DONE ||
current_backup->status == BACKUP_STATUS_ORPHAN ||
current_backup->status == BACKUP_STATUS_CORRUPT)
elog(WARNING, "Backup %s has status: %s",
base36enc(current_backup->start_time), status2str(current_backup->status));
else
elog(ERROR, "Backup %s has status: %s",
base36enc(current_backup->start_time), status2str(current_backup->status));
}
if (rt->recovery_target_tli)
{
parray *timelines;
elog(LOG, "target timeline ID = %u", rt->recovery_target_tli);
/* Read timeline history files from archives */
timelines = readTimeLineHistory_probackup(rt->recovery_target_tli);
if (!satisfy_timeline(timelines, current_backup))
{
if (target_backup_id != INVALID_BACKUP_ID)
elog(ERROR, "target backup %s does not satisfy target timeline",
base36enc(target_backup_id));
else
/* Try to find another backup that satisfies target timeline */
continue;
}
}
if (!satisfy_recovery_target(current_backup, rt))
{
if (target_backup_id != INVALID_BACKUP_ID)
elog(ERROR, "target backup %s does not satisfy restore options",
base36enc(target_backup_id));
else
/* Try to find another backup that satisfies target options */
continue;
}
/*
* Backup is fine and satisfies all recovery options.
* Save it as dest_backup
*/
dest_backup = current_backup;
dest_backup_index = i-1;
}
}
if (dest_backup == NULL)
elog(ERROR, "Backup satisfying target options is not found.");
/* If we already found dest_backup, look for full backup. */
if (dest_backup)
{
base_full_backup = current_backup;
if (current_backup->backup_mode != BACKUP_MODE_FULL)
{
base_full_backup = find_parent_backup(current_backup);
if (base_full_backup == NULL)
elog(ERROR, "Valid full backup for backup %s is not found.",
base36enc(current_backup->start_time));
}
/*
* We have found full backup by link,
* now we need to walk the list to find its index.
*
* TODO I think we should rewrite it someday to use double linked list
* and avoid relying on sort order anymore.
*/
for (i = dest_backup_index; i < parray_num(backups); i++)
{
pgBackup * temp_backup = (pgBackup *) parray_get(backups, i);
if (temp_backup->start_time == base_full_backup->start_time)
{
base_full_backup_index = i;
break;
}
}
}
if (base_full_backup == NULL)
elog(ERROR, "Full backup satisfying target options is not found.");
/*
* Ensure that directories provided in tablespace mapping are valid
* i.e. empty or not exist.
*/
if (is_restore)
check_tablespace_mapping(dest_backup);
if (!is_restore || !rt->restore_no_validate)
{
if (dest_backup->backup_mode != BACKUP_MODE_FULL)
elog(INFO, "Validating parents for backup %s", base36enc(dest_backup->start_time));
/*
* Validate backups from base_full_backup to dest_backup.
*/
for (i = base_full_backup_index; i >= dest_backup_index; i--)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
pgBackupValidate(backup);
/* Maybe we should be more paranoid and check for !BACKUP_STATUS_OK? */
if (backup->status == BACKUP_STATUS_CORRUPT)
{
corrupted_backup = backup;
corrupted_backup_index = i;
break;
}
/* We do not validate WAL files of intermediate backups
* It`s done to speed up restore
*/
}
/* There is no point in wal validation
* if there is corrupted backup between base_backup and dest_backup
*/
if (!corrupted_backup)
/*
* Validate corresponding WAL files.
* We pass base_full_backup timeline as last argument to this function,
* because it's needed to form the name of xlog file.
*/
validate_wal(dest_backup, arclog_path, rt->recovery_target_time,
rt->recovery_target_xid, rt->recovery_target_lsn,
base_full_backup->tli, xlog_seg_size);
/* Set every incremental backup between corrupted backup and nearest FULL backup as orphans */
if (corrupted_backup)
{
for (i = corrupted_backup_index - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
/* Mark incremental OK backup as orphan */
if (backup->backup_mode == BACKUP_MODE_FULL)
break;
if (backup->status != BACKUP_STATUS_OK)
continue;
else
{
char *backup_id,
*corrupted_backup_id;
backup->status = BACKUP_STATUS_ORPHAN;
pgBackupWriteBackupControlFile(backup);
backup_id = base36enc_dup(backup->start_time);
corrupted_backup_id = base36enc_dup(corrupted_backup->start_time);
elog(WARNING, "Backup %s is orphaned because his parent %s is corrupted",
backup_id, corrupted_backup_id);
free(backup_id);
free(corrupted_backup_id);
}
}
}
}
/*
* If dest backup is corrupted or was orphaned in previous check
* produce corresponding error message
*/
if (dest_backup->status == BACKUP_STATUS_OK)
{
if (rt->restore_no_validate)
elog(INFO, "Backup %s is used without validation.", base36enc(dest_backup->start_time));
else
elog(INFO, "Backup %s is valid.", base36enc(dest_backup->start_time));
}
else if (dest_backup->status == BACKUP_STATUS_CORRUPT)
elog(ERROR, "Backup %s is corrupt.", base36enc(dest_backup->start_time));
else if (dest_backup->status == BACKUP_STATUS_ORPHAN)
elog(ERROR, "Backup %s is orphan.", base36enc(dest_backup->start_time));
else
elog(ERROR, "Backup %s has status: %s",
base36enc(dest_backup->start_time), status2str(dest_backup->status));
/* We ensured that all backups are valid, now restore if required */
if (is_restore)
{
for (i = base_full_backup_index; i >= dest_backup_index; i--)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
if (rt->lsn_specified && parse_server_version(backup->server_version) < 100000)
elog(ERROR, "Backup %s was created for version %s which doesn't support recovery_target_lsn",
base36enc(dest_backup->start_time), dest_backup->server_version);
restore_backup(backup);
}
/*
* Delete files which are not in dest backup file list. Files which were
* deleted between previous and current backup are not in the list.
*/
if (dest_backup->backup_mode != BACKUP_MODE_FULL)
remove_deleted_files(dest_backup);
/* Create recovery.conf with given recovery target parameters */
create_recovery_conf(target_backup_id, rt, dest_backup);
}
/* cleanup */
parray_walk(backups, pgBackupFree);
parray_free(backups);
elog(INFO, "%s of backup %s completed.",
action, base36enc(dest_backup->start_time));
return 0;
}
/*
* Restore one backup.
*/
void
restore_backup(pgBackup *backup)
{
char timestamp[100];
char this_backup_path[MAXPGPATH];
char database_path[MAXPGPATH];
char list_path[MAXPGPATH];
parray *files;
int i;
/* arrays with meta info for multi threaded backup */
pthread_t *threads;
restore_files_arg *threads_args;
bool restore_isok = true;
if (backup->status != BACKUP_STATUS_OK)
elog(ERROR, "Backup %s cannot be restored because it is not valid",
base36enc(backup->start_time));
/* confirm block size compatibility */
if (backup->block_size != BLCKSZ)
elog(ERROR,
"BLCKSZ(%d) is not compatible(%d expected)",
backup->block_size, BLCKSZ);
if (backup->wal_block_size != XLOG_BLCKSZ)
elog(ERROR,
"XLOG_BLCKSZ(%d) is not compatible(%d expected)",
backup->wal_block_size, XLOG_BLCKSZ);
time2iso(timestamp, lengthof(timestamp), backup->start_time);
elog(LOG, "restoring database from backup %s", timestamp);
/*
* Restore backup directories.
* this_backup_path = $BACKUP_PATH/backups/instance_name/backup_id
*/
pgBackupGetPath(backup, this_backup_path, lengthof(this_backup_path), NULL);
create_data_directories(pgdata, this_backup_path, true);
/*
* Get list of files which need to be restored.
*/
pgBackupGetPath(backup, database_path, lengthof(database_path), DATABASE_DIR);
pgBackupGetPath(backup, list_path, lengthof(list_path), DATABASE_FILE_LIST);
files = dir_read_file_list(database_path, list_path);
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
threads_args = (restore_files_arg *) palloc(sizeof(restore_files_arg)*num_threads);
/* setup threads */
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
pg_atomic_clear_flag(&file->lock);
}
/* Restore files into target directory */
for (i = 0; i < num_threads; i++)
{
restore_files_arg *arg = &(threads_args[i]);
arg->files = files;
arg->backup = backup;
/* By default there are some error */
threads_args[i].ret = 1;
elog(LOG, "Start thread for num:%li", parray_num(files));
pthread_create(&threads[i], NULL, restore_files, arg);
}
/* Wait theads */
for (i = 0; i < num_threads; i++)
{
pthread_join(threads[i], NULL);
if (threads_args[i].ret == 1)
restore_isok = false;
}
if (!restore_isok)
elog(ERROR, "Data files restoring failed");
pfree(threads);
pfree(threads_args);
/* cleanup */
parray_walk(files, pgFileFree);
parray_free(files);
if (log_level_console <= LOG || log_level_file <= LOG)
elog(LOG, "restore %s backup completed", base36enc(backup->start_time));
}
/*
* Delete files which are not in backup's file list from target pgdata.
* It is necessary to restore incremental backup correctly.
* Files which were deleted between previous and current backup
* are not in the backup's filelist.
*/
static void
remove_deleted_files(pgBackup *backup)
{
parray *files;
parray *files_restored;
char filelist_path[MAXPGPATH];
int i;
pgBackupGetPath(backup, filelist_path, lengthof(filelist_path), DATABASE_FILE_LIST);
/* Read backup's filelist using target database path as base path */
files = dir_read_file_list(pgdata, filelist_path);
parray_qsort(files, pgFileComparePathDesc);
/* Get list of files actually existing in target database */
files_restored = parray_new();
dir_list_file(files_restored, pgdata, true, true, false);
/* To delete from leaf, sort in reversed order */
parray_qsort(files_restored, pgFileComparePathDesc);
for (i = 0; i < parray_num(files_restored); i++)
{
pgFile *file = (pgFile *) parray_get(files_restored, i);
/* If the file is not in the file list, delete it */
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
{
pgFileDelete(file);
if (log_level_console <= LOG || log_level_file <= LOG)
elog(LOG, "deleted %s", GetRelativePath(file->path, pgdata));
}
}
/* cleanup */
parray_walk(files, pgFileFree);
parray_free(files);
parray_walk(files_restored, pgFileFree);
parray_free(files_restored);
}
/*
* Restore files into $PGDATA.
*/
static void *
restore_files(void *arg)
{
int i;
restore_files_arg *arguments = (restore_files_arg *)arg;
for (i = 0; i < parray_num(arguments->files); i++)
{
char from_root[MAXPGPATH];
char *rel_path;
pgFile *file = (pgFile *) parray_get(arguments->files, i);
if (!pg_atomic_test_set_flag(&file->lock))
continue;
pgBackupGetPath(arguments->backup, from_root,
lengthof(from_root), DATABASE_DIR);
/* check for interrupt */
if (interrupted)
elog(ERROR, "interrupted during restore database");
rel_path = GetRelativePath(file->path,from_root);
if (progress)
elog(LOG, "Progress: (%d/%lu). Process file %s ",
i + 1, (unsigned long) parray_num(arguments->files), rel_path);
/*
* For PAGE and PTRACK backups skip files which haven't changed
* since previous backup and thus were not backed up.
* We cannot do the same when restoring DELTA backup because we need information
* about every file to correctly truncate them.
*/
if (file->write_size == BYTES_INVALID &&
(arguments->backup->backup_mode == BACKUP_MODE_DIFF_PAGE
|| arguments->backup->backup_mode == BACKUP_MODE_DIFF_PTRACK))
{
elog(VERBOSE, "The file didn`t change. Skip restore: %s", file->path);
continue;
}
/* Directories were created before */
if (S_ISDIR(file->mode))
{
elog(VERBOSE, "directory, skip");
continue;
}
/* Do not restore tablespace_map file */
if (path_is_prefix_of_path(PG_TABLESPACE_MAP_FILE, rel_path))
{
elog(VERBOSE, "skip tablespace_map");
continue;
}
/*
* restore the file.
* We treat datafiles separately, cause they were backed up block by
* block and have BackupPageHeader meta information, so we cannot just
* copy the file from backup.
*/
elog(VERBOSE, "Restoring file %s, is_datafile %i, is_cfs %i",
file->path, file->is_datafile?1:0, file->is_cfs?1:0);
if (file->is_datafile && !file->is_cfs)
{
char to_path[MAXPGPATH];
join_path_components(to_path, pgdata,
file->path + strlen(from_root) + 1);
restore_data_file(to_path, file,
arguments->backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
false);
}
else
copy_file(from_root, pgdata, file);
/* print size of restored file */
if (file->write_size != BYTES_INVALID)
elog(LOG, "Restored file %s : " INT64_FORMAT " bytes",
file->path, file->write_size);
}
/* Data files restoring is successful */
arguments->ret = 0;
return NULL;
}
/* Create recovery.conf with given recovery target parameters */
static void
create_recovery_conf(time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup)
{
char path[MAXPGPATH];
FILE *fp;
bool need_restore_conf = false;
if (!backup->stream
|| (rt->time_specified || rt->xid_specified))
need_restore_conf = true;
/* No need to generate recovery.conf at all. */
if (!(need_restore_conf || restore_as_replica))
return;
elog(LOG, "----------------------------------------");
elog(LOG, "creating recovery.conf");
snprintf(path, lengthof(path), "%s/recovery.conf", pgdata);
fp = fopen(path, "wt");
if (fp == NULL)
elog(ERROR, "cannot open recovery.conf \"%s\": %s", path,
strerror(errno));
fprintf(fp, "# recovery.conf generated by pg_probackup %s\n",
PROGRAM_VERSION);
if (need_restore_conf)
{
fprintf(fp, "restore_command = '%s archive-get -B %s --instance %s "
"--wal-file-path %%p --wal-file-name %%f'\n",
PROGRAM_NAME, backup_path, instance_name);
/*
* We've already checked that only one of the four following mutually
* exclusive options is specified, so the order of calls is insignificant.
*/
if (rt->recovery_target_name)
fprintf(fp, "recovery_target_name = '%s'\n", rt->recovery_target_name);
if (rt->time_specified)
fprintf(fp, "recovery_target_time = '%s'\n", rt->target_time_string);
if (rt->xid_specified)
fprintf(fp, "recovery_target_xid = '%s'\n", rt->target_xid_string);
if (rt->recovery_target_lsn)
fprintf(fp, "recovery_target_lsn = '%s'\n", rt->target_lsn_string);
if (rt->recovery_target_immediate)
fprintf(fp, "recovery_target = 'immediate'\n");
if (rt->inclusive_specified)
fprintf(fp, "recovery_target_inclusive = '%s'\n",
rt->recovery_target_inclusive?"true":"false");
if (rt->recovery_target_tli)
fprintf(fp, "recovery_target_timeline = '%u'\n", rt->recovery_target_tli);
if (rt->recovery_target_action)
fprintf(fp, "recovery_target_action = '%s'\n", rt->recovery_target_action);
}
if (restore_as_replica)
{
fprintf(fp, "standby_mode = 'on'\n");
if (backup->primary_conninfo)
fprintf(fp, "primary_conninfo = '%s'\n", backup->primary_conninfo);
}
if (fflush(fp) != 0 ||
fsync(fileno(fp)) != 0 ||
fclose(fp))
elog(ERROR, "cannot write recovery.conf \"%s\": %s", path,
strerror(errno));
}
/*
* Try to read a timeline's history file.
*
* If successful, return the list of component TLIs (the ancestor
* timelines followed by target timeline). If we cannot find the history file,
* assume that the timeline has no parents, and return a list of just the
* specified timeline ID.
* based on readTimeLineHistory() in timeline.c
*/
parray *
readTimeLineHistory_probackup(TimeLineID targetTLI)
{
parray *result;
char path[MAXPGPATH];
char fline[MAXPGPATH];
FILE *fd = NULL;
TimeLineHistoryEntry *entry;
TimeLineHistoryEntry *last_timeline = NULL;
/* Look for timeline history file in archlog_path */
snprintf(path, lengthof(path), "%s/%08X.history", arclog_path,
targetTLI);
/* Timeline 1 does not have a history file */
if (targetTLI != 1)
{
fd = fopen(path, "rt");
if (fd == NULL)
{
if (errno != ENOENT)
elog(ERROR, "could not open file \"%s\": %s", path,
strerror(errno));
/* There is no history file for target timeline */
elog(ERROR, "recovery target timeline %u does not exist",
targetTLI);
}
}
result = parray_new();
/*
* Parse the file...
*/
while (fd && fgets(fline, sizeof(fline), fd) != NULL)
{
char *ptr;
TimeLineID tli;
uint32 switchpoint_hi;
uint32 switchpoint_lo;
int nfields;
for (ptr = fline; *ptr; ptr++)
{
if (!isspace((unsigned char) *ptr))
break;
}
if (*ptr == '\0' || *ptr == '#')
continue;
nfields = sscanf(fline, "%u\t%X/%X", &tli, &switchpoint_hi, &switchpoint_lo);
if (nfields < 1)
{
/* expect a numeric timeline ID as first field of line */
elog(ERROR,
"syntax error in history file: %s. Expected a numeric timeline ID.",
fline);
}
if (nfields != 3)
elog(ERROR,
"syntax error in history file: %s. Expected a transaction log switchpoint location.",
fline);
if (last_timeline && tli <= last_timeline->tli)
elog(ERROR,
"Timeline IDs must be in increasing sequence.");
entry = pgut_new(TimeLineHistoryEntry);
entry->tli = tli;
entry->end = ((uint64) switchpoint_hi << 32) | switchpoint_lo;
last_timeline = entry;
/* Build list with newest item first */
parray_insert(result, 0, entry);
/* we ignore the remainder of each line */
}
if (fd)
fclose(fd);
if (last_timeline && targetTLI <= last_timeline->tli)
elog(ERROR, "Timeline IDs must be less than child timeline's ID.");
/* append target timeline */
entry = pgut_new(TimeLineHistoryEntry);
entry->tli = targetTLI;
/* LSN in target timeline is valid */
/* TODO ensure that -1UL --> -1L fix is correct */
entry->end = (uint32) (-1L << 32) | -1L;
parray_insert(result, 0, entry);
return result;
}
bool
satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt)
{
if (rt->xid_specified)
return backup->recovery_xid <= rt->recovery_target_xid;
if (rt->time_specified)
return backup->recovery_time <= rt->recovery_target_time;
if (rt->lsn_specified)
return backup->stop_lsn <= rt->recovery_target_lsn;
return true;
}
bool
satisfy_timeline(const parray *timelines, const pgBackup *backup)
{
int i;
for (i = 0; i < parray_num(timelines); i++)
{
TimeLineHistoryEntry *timeline;
timeline = (TimeLineHistoryEntry *) parray_get(timelines, i);
if (backup->tli == timeline->tli &&
backup->stop_lsn < timeline->end)
return true;
}
return false;
}
/*
* Get recovery options in the string format, parse them
* and fill up the pgRecoveryTarget structure.
*/
pgRecoveryTarget *
parseRecoveryTargetOptions(const char *target_time,
const char *target_xid,
const char *target_inclusive,
TimeLineID target_tli,
const char *target_lsn,
bool target_immediate,
const char *target_name,
const char *target_action,
bool restore_no_validate)
{
time_t dummy_time;
TransactionId dummy_xid;
bool dummy_bool;
XLogRecPtr dummy_lsn;
/*
* count the number of the mutually exclusive options which may specify
* recovery target. If final value > 1, throw an error.
*/
int recovery_target_specified = 0;
pgRecoveryTarget *rt = pgut_new(pgRecoveryTarget);
/* fill all options with default values */
rt->time_specified = false;
rt->xid_specified = false;
rt->inclusive_specified = false;
rt->lsn_specified = false;
rt->recovery_target_time = 0;
rt->recovery_target_xid = 0;
rt->recovery_target_lsn = InvalidXLogRecPtr;
rt->target_time_string = NULL;
rt->target_xid_string = NULL;
rt->target_lsn_string = NULL;
rt->recovery_target_inclusive = false;
rt->recovery_target_tli = 0;
rt->recovery_target_immediate = false;
rt->recovery_target_name = NULL;
rt->recovery_target_action = NULL;
rt->restore_no_validate = false;
/* parse given options */
if (target_time)
{
recovery_target_specified++;
rt->time_specified = true;
rt->target_time_string = target_time;
if (parse_time(target_time, &dummy_time, false))
rt->recovery_target_time = dummy_time;
else
elog(ERROR, "Invalid value of --time option %s", target_time);
}
if (target_xid)
{
recovery_target_specified++;
rt->xid_specified = true;
rt->target_xid_string = target_xid;
#ifdef PGPRO_EE
if (parse_uint64(target_xid, &dummy_xid, 0))
#else
if (parse_uint32(target_xid, &dummy_xid, 0))
#endif
rt->recovery_target_xid = dummy_xid;
else
elog(ERROR, "Invalid value of --xid option %s", target_xid);
}
if (target_lsn)
{
recovery_target_specified++;
rt->lsn_specified = true;
rt->target_lsn_string = target_lsn;
if (parse_lsn(target_lsn, &dummy_lsn))
rt->recovery_target_lsn = dummy_lsn;
else
elog(ERROR, "Invalid value of --lsn option %s", target_lsn);
}
if (target_inclusive)
{
rt->inclusive_specified = true;
if (parse_bool(target_inclusive, &dummy_bool))
rt->recovery_target_inclusive = dummy_bool;
else
elog(ERROR, "Invalid value of --inclusive option %s", target_inclusive);
}
rt->recovery_target_tli = target_tli;
if (target_immediate)
{
recovery_target_specified++;
rt->recovery_target_immediate = target_immediate;
}
if (restore_no_validate)
{
rt->restore_no_validate = restore_no_validate;
}
if (target_name)
{
recovery_target_specified++;
rt->recovery_target_name = target_name;
}
if (target_action)
{
rt->recovery_target_action = target_action;
if ((strcmp(target_action, "pause") != 0)
&& (strcmp(target_action, "promote") != 0)
&& (strcmp(target_action, "shutdown") != 0))
elog(ERROR, "Invalid value of --recovery-target-action option %s", target_action);
}
else
{
/* Default recovery target action is pause */
rt->recovery_target_action = "pause";
}
/* More than one mutually exclusive option was defined. */
if (recovery_target_specified > 1)
elog(ERROR, "At most one of --immediate, --target-name, --time, --xid, or --lsn can be used");
/* If none of the options is defined, '--inclusive' option is meaningless */
if (!(rt->xid_specified || rt->time_specified || rt->lsn_specified) && rt->recovery_target_inclusive)
elog(ERROR, "--inclusive option applies when either --time or --xid is specified");
return rt;
}

500
src/show.c Normal file
View File

@ -0,0 +1,500 @@
/*-------------------------------------------------------------------------
*
* show.c: show backup information.
*
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <time.h>
#include <unistd.h>
#include <dirent.h>
#include <sys/stat.h>
#include "pqexpbuffer.h"
#include "utils/json.h"
static void show_instance_start(void);
static void show_instance_end(void);
static void show_instance(time_t requested_backup_id, bool show_name);
static int show_backup(time_t requested_backup_id);
static void show_instance_plain(parray *backup_list, bool show_name);
static void show_instance_json(parray *backup_list);
static PQExpBufferData show_buf;
static bool first_instance = true;
static int32 json_level = 0;
int
do_show(time_t requested_backup_id)
{
if (instance_name == NULL &&
requested_backup_id != INVALID_BACKUP_ID)
elog(ERROR, "You must specify --instance to use --backup_id option");
if (instance_name == NULL)
{
/* Show list of instances */
char path[MAXPGPATH];
DIR *dir;
struct dirent *dent;
/* open directory and list contents */
join_path_components(path, backup_path, BACKUPS_DIR);
dir = opendir(path);
if (dir == NULL)
elog(ERROR, "Cannot open directory \"%s\": %s",
path, strerror(errno));
show_instance_start();
while (errno = 0, (dent = readdir(dir)) != NULL)
{
char child[MAXPGPATH];
struct stat st;
/* skip entries point current dir or parent dir */
if (strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0)
continue;
join_path_components(child, path, dent->d_name);
if (lstat(child, &st) == -1)
elog(ERROR, "Cannot stat file \"%s\": %s",
child, strerror(errno));
if (!S_ISDIR(st.st_mode))
continue;
instance_name = dent->d_name;
sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name);
show_instance(INVALID_BACKUP_ID, true);
}
if (errno)
elog(ERROR, "Cannot read directory \"%s\": %s",
path, strerror(errno));
if (closedir(dir))
elog(ERROR, "Cannot close directory \"%s\": %s",
path, strerror(errno));
show_instance_end();
return 0;
}
else if (requested_backup_id == INVALID_BACKUP_ID ||
show_format == SHOW_JSON)
{
show_instance_start();
show_instance(requested_backup_id, false);
show_instance_end();
return 0;
}
else
return show_backup(requested_backup_id);
}
static void
pretty_size(int64 size, char *buf, size_t len)
{
int exp = 0;
/* minus means the size is invalid */
if (size < 0)
{
strncpy(buf, "----", len);
return;
}
/* determine postfix */
while (size > 9999)
{
++exp;
size /= 1000;
}
switch (exp)
{
case 0:
snprintf(buf, len, "%dB", (int) size);
break;
case 1:
snprintf(buf, len, "%dkB", (int) size);
break;
case 2:
snprintf(buf, len, "%dMB", (int) size);
break;
case 3:
snprintf(buf, len, "%dGB", (int) size);
break;
case 4:
snprintf(buf, len, "%dTB", (int) size);
break;
case 5:
snprintf(buf, len, "%dPB", (int) size);
break;
default:
strncpy(buf, "***", len);
break;
}
}
static TimeLineID
get_parent_tli(TimeLineID child_tli)
{
TimeLineID result = 0;
char path[MAXPGPATH];
char fline[MAXPGPATH];
FILE *fd;
/* Timeline 1 does not have a history file and parent timeline */
if (child_tli == 1)
return 0;
/* Search history file in archives */
snprintf(path, lengthof(path), "%s/%08X.history", arclog_path,
child_tli);
fd = fopen(path, "rt");
if (fd == NULL)
{
if (errno != ENOENT)
elog(ERROR, "could not open file \"%s\": %s", path,
strerror(errno));
/* Did not find history file, do not raise the error */
return 0;
}
/*
* Parse the file...
*/
while (fgets(fline, sizeof(fline), fd) != NULL)
{
/* skip leading whitespace and check for # comment */
char *ptr;
char *endptr;
for (ptr = fline; *ptr; ptr++)
{
if (!IsSpace(*ptr))
break;
}
if (*ptr == '\0' || *ptr == '#')
continue;
/* expect a numeric timeline ID as first field of line */
result = (TimeLineID) strtoul(ptr, &endptr, 0);
if (endptr == ptr)
elog(ERROR,
"syntax error(timeline ID) in history file: %s",
fline);
}
fclose(fd);
/* TLI of the last line is parent TLI */
return result;
}
/*
* Initialize instance visualization.
*/
static void
show_instance_start(void)
{
initPQExpBuffer(&show_buf);
if (show_format == SHOW_PLAIN)
return;
first_instance = true;
json_level = 0;
appendPQExpBufferChar(&show_buf, '[');
json_level++;
}
/*
* Finalize instance visualization.
*/
static void
show_instance_end(void)
{
if (show_format == SHOW_JSON)
appendPQExpBufferStr(&show_buf, "\n]\n");
fputs(show_buf.data, stdout);
termPQExpBuffer(&show_buf);
}
/*
* Show brief meta information about all backups in the backup instance.
*/
static void
show_instance(time_t requested_backup_id, bool show_name)
{
parray *backup_list;
backup_list = catalog_get_backup_list(requested_backup_id);
if (show_format == SHOW_PLAIN)
show_instance_plain(backup_list, show_name);
else if (show_format == SHOW_JSON)
show_instance_json(backup_list);
else
elog(ERROR, "Invalid show format %d", (int) show_format);
/* cleanup */
parray_walk(backup_list, pgBackupFree);
parray_free(backup_list);
}
/*
* Show detailed meta information about specified backup.
*/
static int
show_backup(time_t requested_backup_id)
{
pgBackup *backup;
backup = read_backup(requested_backup_id);
if (backup == NULL)
{
elog(INFO, "Requested backup \"%s\" is not found.",
/* We do not need free base36enc's result, we exit anyway */
base36enc(requested_backup_id));
/* This is not error */
return 0;
}
if (show_format == SHOW_PLAIN)
pgBackupWriteControl(stdout, backup);
else
elog(ERROR, "Invalid show format %d", (int) show_format);
/* cleanup */
pgBackupFree(backup);
return 0;
}
/*
* Plain output.
*/
/*
* Show instance backups in plain format.
*/
static void
show_instance_plain(parray *backup_list, bool show_name)
{
int i;
if (show_name)
printfPQExpBuffer(&show_buf, "\nBACKUP INSTANCE '%s'\n", instance_name);
/* if you add new fields here, fix the header */
/* show header */
appendPQExpBufferStr(&show_buf,
"============================================================================================================================================\n");
appendPQExpBufferStr(&show_buf,
" Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n");
appendPQExpBufferStr(&show_buf,
"============================================================================================================================================\n");
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = parray_get(backup_list, i);
TimeLineID parent_tli;
char timestamp[100] = "----";
char duration[20] = "----";
char data_bytes_str[10] = "----";
if (backup->recovery_time != (time_t) 0)
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
if (backup->end_time != (time_t) 0)
snprintf(duration, lengthof(duration), "%.*lfs", 0,
difftime(backup->end_time, backup->start_time));
/*
* Calculate Data field, in the case of full backup this shows the
* total amount of data. For an differential backup, this size is only
* the difference of data accumulated.
*/
pretty_size(backup->data_bytes, data_bytes_str,
lengthof(data_bytes_str));
/* Get parent timeline before printing */
parent_tli = get_parent_tli(backup->tli);
appendPQExpBuffer(&show_buf,
" %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n",
instance_name,
(backup->server_version[0] ? backup->server_version : "----"),
base36enc(backup->start_time),
timestamp,
pgBackupGetBackupMode(backup),
backup->stream ? "STREAM": "ARCHIVE",
backup->tli,
parent_tli,
duration,
data_bytes_str,
(uint32) (backup->start_lsn >> 32),
(uint32) backup->start_lsn,
(uint32) (backup->stop_lsn >> 32),
(uint32) backup->stop_lsn,
status2str(backup->status));
}
}
/*
* Json output.
*/
/*
* Show instance backups in json format.
*/
static void
show_instance_json(parray *backup_list)
{
int i;
PQExpBuffer buf = &show_buf;
if (!first_instance)
appendPQExpBufferChar(buf, ',');
/* Begin of instance object */
json_add(buf, JT_BEGIN_OBJECT, &json_level);
json_add_value(buf, "instance", instance_name, json_level, false);
json_add_key(buf, "backups", json_level, true);
/*
* List backups.
*/
json_add(buf, JT_BEGIN_ARRAY, &json_level);
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = parray_get(backup_list, i);
TimeLineID parent_tli;
char timestamp[100] = "----";
char lsn[20];
if (i != 0)
appendPQExpBufferChar(buf, ',');
json_add(buf, JT_BEGIN_OBJECT, &json_level);
json_add_value(buf, "id", base36enc(backup->start_time), json_level,
false);
if (backup->parent_backup != 0)
json_add_value(buf, "parent-backup-id",
base36enc(backup->parent_backup), json_level, true);
json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup),
json_level, true);
json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE",
json_level, true);
json_add_value(buf, "compress-alg",
deparse_compress_alg(backup->compress_alg), json_level,
true);
json_add_key(buf, "compress-level", json_level, true);
appendPQExpBuffer(buf, "%d", backup->compress_level);
json_add_value(buf, "from-replica",
backup->from_replica ? "true" : "false", json_level,
true);
json_add_key(buf, "block-size", json_level, true);
appendPQExpBuffer(buf, "%u", backup->block_size);
json_add_key(buf, "xlog-block-size", json_level, true);
appendPQExpBuffer(buf, "%u", backup->wal_block_size);
json_add_key(buf, "checksum-version", json_level, true);
appendPQExpBuffer(buf, "%u", backup->checksum_version);
json_add_value(buf, "program-version", backup->program_version,
json_level, true);
json_add_value(buf, "server-version", backup->server_version,
json_level, true);
json_add_key(buf, "current-tli", json_level, true);
appendPQExpBuffer(buf, "%d", backup->tli);
json_add_key(buf, "parent-tli", json_level, true);
parent_tli = get_parent_tli(backup->tli);
appendPQExpBuffer(buf, "%u", parent_tli);
snprintf(lsn, lengthof(lsn), "%X/%X",
(uint32) (backup->start_lsn >> 32), (uint32) backup->start_lsn);
json_add_value(buf, "start-lsn", lsn, json_level, true);
snprintf(lsn, lengthof(lsn), "%X/%X",
(uint32) (backup->stop_lsn >> 32), (uint32) backup->stop_lsn);
json_add_value(buf, "stop-lsn", lsn, json_level, true);
time2iso(timestamp, lengthof(timestamp), backup->start_time);
json_add_value(buf, "start-time", timestamp, json_level, true);
if (backup->end_time)
{
time2iso(timestamp, lengthof(timestamp), backup->end_time);
json_add_value(buf, "end-time", timestamp, json_level, true);
}
json_add_key(buf, "recovery-xid", json_level, true);
appendPQExpBuffer(buf, XID_FMT, backup->recovery_xid);
if (backup->recovery_time > 0)
{
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
json_add_value(buf, "recovery-time", timestamp, json_level, true);
}
if (backup->data_bytes != BYTES_INVALID)
{
json_add_key(buf, "data-bytes", json_level, true);
appendPQExpBuffer(buf, INT64_FORMAT, backup->data_bytes);
}
if (backup->wal_bytes != BYTES_INVALID)
{
json_add_key(buf, "wal-bytes", json_level, true);
appendPQExpBuffer(buf, INT64_FORMAT, backup->wal_bytes);
}
if (backup->primary_conninfo)
json_add_value(buf, "primary_conninfo", backup->primary_conninfo,
json_level, true);
json_add_value(buf, "status", status2str(backup->status), json_level,
true);
json_add(buf, JT_END_OBJECT, &json_level);
}
/* End of backups */
json_add(buf, JT_END_ARRAY, &json_level);
/* End of instance object */
json_add(buf, JT_END_OBJECT, &json_level);
first_instance = false;
}

118
src/status.c Normal file
View File

@ -0,0 +1,118 @@
/*-------------------------------------------------------------------------
*
* status.c
*
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
*
* Monitor status of a PostgreSQL server.
*
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <signal.h>
#include <sys/types.h>
#include <unistd.h>
#include "pg_probackup.h"
/* PID can be negative for standalone backend */
typedef long pgpid_t;
static pgpid_t get_pgpid(void);
static bool postmaster_is_alive(pid_t pid);
/*
* get_pgpid
*
* Get PID of postmaster, by scanning postmaster.pid.
*/
static pgpid_t
get_pgpid(void)
{
FILE *pidf;
long pid;
char pid_file[MAXPGPATH];
snprintf(pid_file, lengthof(pid_file), "%s/postmaster.pid", pgdata);
pidf = fopen(pid_file, PG_BINARY_R);
if (pidf == NULL)
{
/* No pid file, not an error on startup */
if (errno == ENOENT)
return 0;
else
{
elog(ERROR, "could not open PID file \"%s\": %s",
pid_file, strerror(errno));
}
}
if (fscanf(pidf, "%ld", &pid) != 1)
{
/* Is the file empty? */
if (ftell(pidf) == 0 && feof(pidf))
elog(ERROR, "the PID file \"%s\" is empty",
pid_file);
else
elog(ERROR, "invalid data in PID file \"%s\"\n",
pid_file);
}
fclose(pidf);
return (pgpid_t) pid;
}
/*
* postmaster_is_alive
*
* Check whether postmaster is alive or not.
*/
static bool
postmaster_is_alive(pid_t pid)
{
/*
* Test to see if the process is still there. Note that we do not
* consider an EPERM failure to mean that the process is still there;
* EPERM must mean that the given PID belongs to some other userid, and
* considering the permissions on $PGDATA, that means it's not the
* postmaster we are after.
*
* Don't believe that our own PID or parent shell's PID is the postmaster,
* either. (Windows hasn't got getppid(), though.)
*/
if (pid == getpid())
return false;
#ifndef WIN32
if (pid == getppid())
return false;
#endif
if (kill(pid, 0) == 0)
return true;
return false;
}
/*
* is_pg_running
*
*
*/
bool
is_pg_running(void)
{
pgpid_t pid;
pid = get_pgpid();
/* 0 means no pid file */
if (pid == 0)
return false;
/* Case of a standalone backend */
if (pid < 0)
pid = -pid;
/* Check if postmaster is alive */
return postmaster_is_alive((pid_t) pid);
}

349
src/util.c Normal file
View File

@ -0,0 +1,349 @@
/*-------------------------------------------------------------------------
*
* util.c: log messages to log file or stderr, and misc code.
*
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <time.h>
#include "storage/bufpage.h"
#if PG_VERSION_NUM >= 110000
#include "streamutil.h"
#endif
const char *
base36enc(long unsigned int value)
{
const char base36[36] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
/* log(2**64) / log(36) = 12.38 => max 13 char + '\0' */
static char buffer[14];
unsigned int offset = sizeof(buffer);
buffer[--offset] = '\0';
do {
buffer[--offset] = base36[value % 36];
} while (value /= 36);
return &buffer[offset];
}
/*
* Same as base36enc(), but the result must be released by the user.
*/
char *
base36enc_dup(long unsigned int value)
{
const char base36[36] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
/* log(2**64) / log(36) = 12.38 => max 13 char + '\0' */
char buffer[14];
unsigned int offset = sizeof(buffer);
buffer[--offset] = '\0';
do {
buffer[--offset] = base36[value % 36];
} while (value /= 36);
return strdup(&buffer[offset]);
}
long unsigned int
base36dec(const char *text)
{
return strtoul(text, NULL, 36);
}
static void
checkControlFile(ControlFileData *ControlFile)
{
pg_crc32c crc;
/* Calculate CRC */
INIT_CRC32C(crc);
COMP_CRC32C(crc, (char *) ControlFile, offsetof(ControlFileData, crc));
FIN_CRC32C(crc);
/* Then compare it */
if (!EQ_CRC32C(crc, ControlFile->crc))
elog(ERROR, "Calculated CRC checksum does not match value stored in file.\n"
"Either the file is corrupt, or it has a different layout than this program\n"
"is expecting. The results below are untrustworthy.");
if (ControlFile->pg_control_version % 65536 == 0 && ControlFile->pg_control_version / 65536 != 0)
elog(ERROR, "possible byte ordering mismatch\n"
"The byte ordering used to store the pg_control file might not match the one\n"
"used by this program. In that case the results below would be incorrect, and\n"
"the PostgreSQL installation would be incompatible with this data directory.");
}
/*
* Verify control file contents in the buffer src, and copy it to *ControlFile.
*/
static void
digestControlFile(ControlFileData *ControlFile, char *src, size_t size)
{
#if PG_VERSION_NUM >= 100000
int ControlFileSize = PG_CONTROL_FILE_SIZE;
#else
int ControlFileSize = PG_CONTROL_SIZE;
#endif
if (size != ControlFileSize)
elog(ERROR, "unexpected control file size %d, expected %d",
(int) size, ControlFileSize);
memcpy(ControlFile, src, sizeof(ControlFileData));
/* Additional checks on control file */
checkControlFile(ControlFile);
}
/*
* Utility shared by backup and restore to fetch the current timeline
* used by a node.
*/
TimeLineID
get_current_timeline(bool safe)
{
ControlFileData ControlFile;
char *buffer;
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata, "global/pg_control", &size, safe);
if (safe && buffer == NULL)
return 0;
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
return ControlFile.checkPointCopy.ThisTimeLineID;
}
uint64
get_system_identifier(char *pgdata_path)
{
ControlFileData ControlFile;
char *buffer;
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata_path, "global/pg_control", &size, false);
if (buffer == NULL)
return 0;
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
return ControlFile.system_identifier;
}
uint64
get_remote_system_identifier(PGconn *conn)
{
#if PG_VERSION_NUM >= 90600
PGresult *res;
uint64 system_id_conn;
char *val;
res = pgut_execute(conn,
"SELECT system_identifier FROM pg_catalog.pg_control_system()",
0, NULL);
val = PQgetvalue(res, 0, 0);
if (!parse_uint64(val, &system_id_conn, 0))
{
PQclear(res);
elog(ERROR, "%s is not system_identifier", val);
}
PQclear(res);
return system_id_conn;
#else
char *buffer;
size_t size;
ControlFileData ControlFile;
buffer = fetchFile(conn, "global/pg_control", &size);
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
return ControlFile.system_identifier;
#endif
}
uint32
get_xlog_seg_size(char *pgdata_path)
{
#if PG_VERSION_NUM >= 110000
ControlFileData ControlFile;
char *buffer;
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata_path, "global/pg_control", &size, false);
if (buffer == NULL)
return 0;
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
return ControlFile.xlog_seg_size;
#else
return (uint32) XLOG_SEG_SIZE;
#endif
}
uint32
get_data_checksum_version(bool safe)
{
ControlFileData ControlFile;
char *buffer;
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata, "global/pg_control", &size, safe);
if (buffer == NULL)
return 0;
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
return ControlFile.data_checksum_version;
}
/*
* Convert time_t value to ISO-8601 format string. Always set timezone offset.
*/
void
time2iso(char *buf, size_t len, time_t time)
{
struct tm *ptm = gmtime(&time);
time_t gmt = mktime(ptm);
time_t offset;
char *ptr = buf;
ptm = localtime(&time);
offset = time - gmt + (ptm->tm_isdst ? 3600 : 0);
strftime(ptr, len, "%Y-%m-%d %H:%M:%S", ptm);
ptr += strlen(ptr);
snprintf(ptr, len - (ptr - buf), "%c%02d",
(offset >= 0) ? '+' : '-',
abs((int) offset) / SECS_PER_HOUR);
if (abs((int) offset) % SECS_PER_HOUR != 0)
{
ptr += strlen(ptr);
snprintf(ptr, len - (ptr - buf), ":%02d",
abs((int) offset % SECS_PER_HOUR) / SECS_PER_MINUTE);
}
}
/* copied from timestamp.c */
pg_time_t
timestamptz_to_time_t(TimestampTz t)
{
pg_time_t result;
#ifdef HAVE_INT64_TIMESTAMP
result = (pg_time_t) (t / USECS_PER_SEC +
((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
#else
result = (pg_time_t) (t +
((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
#endif
return result;
}
/* Parse string representation of the server version */
int
parse_server_version(char *server_version_str)
{
int nfields;
int result = 0;
int major_version = 0;
int minor_version = 0;
nfields = sscanf(server_version_str, "%d.%d", &major_version, &minor_version);
if (nfields == 2)
{
/* Server version lower than 10 */
if (major_version > 10)
elog(ERROR, "Server version format doesn't match major version %d", major_version);
result = major_version * 10000 + minor_version * 100;
}
else if (nfields == 1)
{
if (major_version < 10)
elog(ERROR, "Server version format doesn't match major version %d", major_version);
result = major_version * 10000;
}
else
elog(ERROR, "Unknown server version format");
return result;
}
const char *
status2str(BackupStatus status)
{
static const char *statusName[] =
{
"UNKNOWN",
"OK",
"ERROR",
"RUNNING",
"MERGING",
"DELETING",
"DELETED",
"DONE",
"ORPHAN",
"CORRUPT"
};
if (status < BACKUP_STATUS_INVALID || BACKUP_STATUS_CORRUPT < status)
return "UNKNOWN";
return statusName[status];
}
void
remove_trailing_space(char *buf, int comment_mark)
{
int i;
char *last_char = NULL;
for (i = 0; buf[i]; i++)
{
if (buf[i] == comment_mark || buf[i] == '\n' || buf[i] == '\r')
{
buf[i] = '\0';
break;
}
}
for (i = 0; buf[i]; i++)
{
if (!isspace(buf[i]))
last_char = buf + i;
}
if (last_char != NULL)
*(last_char + 1) = '\0';
}
void
remove_not_digit(char *buf, size_t len, const char *str)
{
int i, j;
for (i = 0, j = 0; str[i] && j < len; i++)
{
if (!isdigit(str[i]))
continue;
buf[j++] = str[i];
}
buf[j] = '\0';
}

134
src/utils/json.c Normal file
View File

@ -0,0 +1,134 @@
/*-------------------------------------------------------------------------
*
* json.c: - make json document.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "json.h"
static void json_add_indent(PQExpBuffer buf, int32 level);
static void json_add_escaped(PQExpBuffer buf, const char *str);
/*
* Start or end json token. Currently it is a json object or array.
*
* Function modifies level value and adds indent if it appropriate.
*/
void
json_add(PQExpBuffer buf, JsonToken type, int32 *level)
{
switch (type)
{
case JT_BEGIN_ARRAY:
appendPQExpBufferChar(buf, '[');
*level += 1;
break;
case JT_END_ARRAY:
*level -= 1;
if (*level == 0)
appendPQExpBufferChar(buf, '\n');
else
json_add_indent(buf, *level);
appendPQExpBufferChar(buf, ']');
break;
case JT_BEGIN_OBJECT:
json_add_indent(buf, *level);
appendPQExpBufferChar(buf, '{');
*level += 1;
break;
case JT_END_OBJECT:
*level -= 1;
if (*level == 0)
appendPQExpBufferChar(buf, '\n');
else
json_add_indent(buf, *level);
appendPQExpBufferChar(buf, '}');
break;
default:
break;
}
}
/*
* Add json object's key. If it isn't first key we need to add a comma.
*/
void
json_add_key(PQExpBuffer buf, const char *name, int32 level, bool add_comma)
{
if (add_comma)
appendPQExpBufferChar(buf, ',');
json_add_indent(buf, level);
json_add_escaped(buf, name);
appendPQExpBufferStr(buf, ": ");
}
/*
* Add json object's key and value. If it isn't first key we need to add a
* comma.
*/
void
json_add_value(PQExpBuffer buf, const char *name, const char *value,
int32 level, bool add_comma)
{
json_add_key(buf, name, level, add_comma);
json_add_escaped(buf, value);
}
static void
json_add_indent(PQExpBuffer buf, int32 level)
{
uint16 i;
if (level == 0)
return;
appendPQExpBufferChar(buf, '\n');
for (i = 0; i < level; i++)
appendPQExpBufferStr(buf, " ");
}
static void
json_add_escaped(PQExpBuffer buf, const char *str)
{
const char *p;
appendPQExpBufferChar(buf, '"');
for (p = str; *p; p++)
{
switch (*p)
{
case '\b':
appendPQExpBufferStr(buf, "\\b");
break;
case '\f':
appendPQExpBufferStr(buf, "\\f");
break;
case '\n':
appendPQExpBufferStr(buf, "\\n");
break;
case '\r':
appendPQExpBufferStr(buf, "\\r");
break;
case '\t':
appendPQExpBufferStr(buf, "\\t");
break;
case '"':
appendPQExpBufferStr(buf, "\\\"");
break;
case '\\':
appendPQExpBufferStr(buf, "\\\\");
break;
default:
if ((unsigned char) *p < ' ')
appendPQExpBuffer(buf, "\\u%04x", (int) *p);
else
appendPQExpBufferChar(buf, *p);
break;
}
}
appendPQExpBufferChar(buf, '"');
}

33
src/utils/json.h Normal file
View File

@ -0,0 +1,33 @@
/*-------------------------------------------------------------------------
*
* json.h: - prototypes of json output functions.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#ifndef PROBACKUP_JSON_H
#define PROBACKUP_JSON_H
#include "postgres_fe.h"
#include "pqexpbuffer.h"
/*
* Json document tokens.
*/
typedef enum
{
JT_BEGIN_ARRAY,
JT_END_ARRAY,
JT_BEGIN_OBJECT,
JT_END_OBJECT
} JsonToken;
extern void json_add(PQExpBuffer buf, JsonToken type, int32 *level);
extern void json_add_key(PQExpBuffer buf, const char *name, int32 level,
bool add_comma);
extern void json_add_value(PQExpBuffer buf, const char *name, const char *value,
int32 level, bool add_comma);
#endif /* PROBACKUP_JSON_H */

621
src/utils/logger.c Normal file
View File

@ -0,0 +1,621 @@
/*-------------------------------------------------------------------------
*
* logger.c: - log events into log file or stderr.
*
* Copyright (c) 2017-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <time.h>
#include "logger.h"
#include "pgut.h"
#include "pg_probackup.h"
#include "thread.h"
/* Logger parameters */
int log_level_console = LOG_LEVEL_CONSOLE_DEFAULT;
int log_level_file = LOG_LEVEL_FILE_DEFAULT;
char *log_filename = NULL;
char *error_log_filename = NULL;
char *log_directory = NULL;
/*
* If log_path is empty logging is not initialized.
* We will log only into stderr
*/
char log_path[MAXPGPATH] = "";
/* Maximum size of an individual log file in kilobytes */
int log_rotation_size = 0;
/* Maximum lifetime of an individual log file in minutes */
int log_rotation_age = 0;
/* Implementation for logging.h */
typedef enum
{
PG_DEBUG,
PG_PROGRESS,
PG_WARNING,
PG_FATAL
} eLogType;
void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3);
static void elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
pg_attribute_printf(3, 0);
static void elog_stderr(int elevel, const char *fmt, ...)
pg_attribute_printf(2, 3);
/* Functions to work with log files */
static void open_logfile(FILE **file, const char *filename_format);
static void release_logfile(void);
static char *logfile_getname(const char *format, time_t timestamp);
static FILE *logfile_open(const char *filename, const char *mode);
/* Static variables */
static FILE *log_file = NULL;
static FILE *error_log_file = NULL;
static bool exit_hook_registered = false;
/* Logging of the current thread is in progress */
static bool loggin_in_progress = false;
static pthread_mutex_t log_file_mutex = PTHREAD_MUTEX_INITIALIZER;
void
init_logger(const char *root_path)
{
/* Set log path */
if (log_level_file != LOG_OFF || error_log_filename)
{
if (log_directory)
strcpy(log_path, log_directory);
else
join_path_components(log_path, root_path, LOG_DIRECTORY_DEFAULT);
}
}
static void
write_elevel(FILE *stream, int elevel)
{
switch (elevel)
{
case VERBOSE:
fputs("VERBOSE: ", stream);
break;
case LOG:
fputs("LOG: ", stream);
break;
case INFO:
fputs("INFO: ", stream);
break;
case NOTICE:
fputs("NOTICE: ", stream);
break;
case WARNING:
fputs("WARNING: ", stream);
break;
case ERROR:
fputs("ERROR: ", stream);
break;
default:
elog_stderr(ERROR, "invalid logging level: %d", elevel);
break;
}
}
/*
* Exit with code if it is an error.
* Check for in_cleanup flag to avoid deadlock in case of ERROR in cleanup
* routines.
*/
static void
exit_if_necessary(int elevel)
{
if (elevel > WARNING && !in_cleanup)
{
/* Interrupt other possible routines */
interrupted = true;
if (loggin_in_progress)
{
loggin_in_progress = false;
pthread_mutex_unlock(&log_file_mutex);
}
/* If this is not the main thread then don't call exit() */
if (main_tid != pthread_self())
#ifdef WIN32
ExitThread(elevel);
#else
pthread_exit(NULL);
#endif
else
exit(elevel);
}
}
/*
* Logs to stderr or to log file and exit if ERROR.
*
* Actual implementation for elog() and pg_log().
*/
static void
elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
{
bool write_to_file,
write_to_error_log,
write_to_stderr;
va_list error_args,
std_args;
time_t log_time = (time_t) time(NULL);
char strfbuf[128];
write_to_file = elevel >= log_level_file && log_path[0] != '\0';
write_to_error_log = elevel >= ERROR && error_log_filename &&
log_path[0] != '\0';
write_to_stderr = elevel >= log_level_console && !file_only;
pthread_lock(&log_file_mutex);
#ifdef WIN32
std_args = NULL;
error_args = NULL;
#endif
loggin_in_progress = true;
/* We need copy args only if we need write to error log file */
if (write_to_error_log)
va_copy(error_args, args);
/*
* We need copy args only if we need write to stderr. But do not copy args
* if we need to log only to stderr.
*/
if (write_to_stderr && write_to_file)
va_copy(std_args, args);
if (write_to_file || write_to_error_log)
strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z",
localtime(&log_time));
/*
* Write message to log file.
* Do not write to file if this error was raised during write previous
* message.
*/
if (write_to_file)
{
if (log_file == NULL)
{
if (log_filename == NULL)
open_logfile(&log_file, LOG_FILENAME_DEFAULT);
else
open_logfile(&log_file, log_filename);
}
fprintf(log_file, "%s: ", strfbuf);
write_elevel(log_file, elevel);
vfprintf(log_file, fmt, args);
fputc('\n', log_file);
fflush(log_file);
}
/*
* Write error message to error log file.
* Do not write to file if this error was raised during write previous
* message.
*/
if (write_to_error_log)
{
if (error_log_file == NULL)
open_logfile(&error_log_file, error_log_filename);
fprintf(error_log_file, "%s: ", strfbuf);
write_elevel(error_log_file, elevel);
vfprintf(error_log_file, fmt, error_args);
fputc('\n', error_log_file);
fflush(error_log_file);
va_end(error_args);
}
/*
* Write to stderr if the message was not written to log file.
* Write to stderr if the message level is greater than WARNING anyway.
*/
if (write_to_stderr)
{
write_elevel(stderr, elevel);
if (write_to_file)
vfprintf(stderr, fmt, std_args);
else
vfprintf(stderr, fmt, args);
fputc('\n', stderr);
fflush(stderr);
if (write_to_file)
va_end(std_args);
}
exit_if_necessary(elevel);
loggin_in_progress = false;
pthread_mutex_unlock(&log_file_mutex);
}
/*
* Log only to stderr. It is called only within elog_internal() when another
* logging already was started.
*/
static void
elog_stderr(int elevel, const char *fmt, ...)
{
va_list args;
/*
* Do not log message if severity level is less than log_level.
* It is the little optimisation to put it here not in elog_internal().
*/
if (elevel < log_level_console && elevel < ERROR)
return;
va_start(args, fmt);
write_elevel(stderr, elevel);
vfprintf(stderr, fmt, args);
fputc('\n', stderr);
fflush(stderr);
va_end(args);
exit_if_necessary(elevel);
}
/*
* Logs to stderr or to log file and exit if ERROR.
*/
void
elog(int elevel, const char *fmt, ...)
{
va_list args;
/*
* Do not log message if severity level is less than log_level.
* It is the little optimisation to put it here not in elog_internal().
*/
if (elevel < log_level_console && elevel < log_level_file && elevel < ERROR)
return;
va_start(args, fmt);
elog_internal(elevel, false, fmt, args);
va_end(args);
}
/*
* Logs only to log file and exit if ERROR.
*/
void
elog_file(int elevel, const char *fmt, ...)
{
va_list args;
/*
* Do not log message if severity level is less than log_level.
* It is the little optimisation to put it here not in elog_internal().
*/
if (elevel < log_level_file && elevel < ERROR)
return;
va_start(args, fmt);
elog_internal(elevel, true, fmt, args);
va_end(args);
}
/*
* Implementation of pg_log() from logging.h.
*/
void
pg_log(eLogType type, const char *fmt, ...)
{
va_list args;
int elevel = INFO;
/* Transform logging level from eLogType to utils/logger.h levels */
switch (type)
{
case PG_DEBUG:
elevel = LOG;
break;
case PG_PROGRESS:
elevel = INFO;
break;
case PG_WARNING:
elevel = WARNING;
break;
case PG_FATAL:
elevel = ERROR;
break;
default:
elog(ERROR, "invalid logging level: %d", type);
break;
}
/*
* Do not log message if severity level is less than log_level.
* It is the little optimisation to put it here not in elog_internal().
*/
if (elevel < log_level_console && elevel < log_level_file && elevel < ERROR)
return;
va_start(args, fmt);
elog_internal(elevel, false, fmt, args);
va_end(args);
}
/*
* Parses string representation of log level.
*/
int
parse_log_level(const char *level)
{
const char *v = level;
size_t len;
/* Skip all spaces detected */
while (isspace((unsigned char)*v))
v++;
len = strlen(v);
if (len == 0)
elog(ERROR, "log-level is empty");
if (pg_strncasecmp("off", v, len) == 0)
return LOG_OFF;
else if (pg_strncasecmp("verbose", v, len) == 0)
return VERBOSE;
else if (pg_strncasecmp("log", v, len) == 0)
return LOG;
else if (pg_strncasecmp("info", v, len) == 0)
return INFO;
else if (pg_strncasecmp("notice", v, len) == 0)
return NOTICE;
else if (pg_strncasecmp("warning", v, len) == 0)
return WARNING;
else if (pg_strncasecmp("error", v, len) == 0)
return ERROR;
/* Log level is invalid */
elog(ERROR, "invalid log-level \"%s\"", level);
return 0;
}
/*
* Converts integer representation of log level to string.
*/
const char *
deparse_log_level(int level)
{
switch (level)
{
case LOG_OFF:
return "OFF";
case VERBOSE:
return "VERBOSE";
case LOG:
return "LOG";
case INFO:
return "INFO";
case NOTICE:
return "NOTICE";
case WARNING:
return "WARNING";
case ERROR:
return "ERROR";
default:
elog(ERROR, "invalid log-level %d", level);
}
return NULL;
}
/*
* Construct logfile name using timestamp information.
*
* Result is palloc'd.
*/
static char *
logfile_getname(const char *format, time_t timestamp)
{
char *filename;
size_t len;
struct tm *tm = localtime(&timestamp);
if (log_path[0] == '\0')
elog_stderr(ERROR, "logging path is not set");
filename = (char *) palloc(MAXPGPATH);
snprintf(filename, MAXPGPATH, "%s/", log_path);
len = strlen(filename);
/* Treat log_filename as a strftime pattern */
if (strftime(filename + len, MAXPGPATH - len, format, tm) <= 0)
elog_stderr(ERROR, "strftime(%s) failed: %s", format, strerror(errno));
return filename;
}
/*
* Open a new log file.
*/
static FILE *
logfile_open(const char *filename, const char *mode)
{
FILE *fh;
/*
* Create log directory if not present; ignore errors
*/
mkdir(log_path, S_IRWXU);
fh = fopen(filename, mode);
if (fh)
setvbuf(fh, NULL, PG_IOLBF, 0);
else
{
int save_errno = errno;
elog_stderr(ERROR, "could not open log file \"%s\": %s",
filename, strerror(errno));
errno = save_errno;
}
return fh;
}
/*
* Open the log file.
*/
static void
open_logfile(FILE **file, const char *filename_format)
{
char *filename;
char control[MAXPGPATH];
struct stat st;
FILE *control_file;
time_t cur_time = time(NULL);
bool rotation_requested = false,
logfile_exists = false;
filename = logfile_getname(filename_format, cur_time);
/* "log_path" was checked in logfile_getname() */
snprintf(control, MAXPGPATH, "%s.rotation", filename);
if (stat(filename, &st) == -1)
{
if (errno == ENOENT)
{
/* There is no file "filename" and rotation does not need */
goto logfile_open;
}
else
elog_stderr(ERROR, "cannot stat log file \"%s\": %s",
filename, strerror(errno));
}
/* Found log file "filename" */
logfile_exists = true;
/* First check for rotation */
if (log_rotation_size > 0 || log_rotation_age > 0)
{
/* Check for rotation by age */
if (log_rotation_age > 0)
{
struct stat control_st;
if (stat(control, &control_st) == -1)
{
if (errno != ENOENT)
elog_stderr(ERROR, "cannot stat rotation file \"%s\": %s",
control, strerror(errno));
}
else
{
char buf[1024];
control_file = fopen(control, "r");
if (control_file == NULL)
elog_stderr(ERROR, "cannot open rotation file \"%s\": %s",
control, strerror(errno));
if (fgets(buf, lengthof(buf), control_file))
{
time_t creation_time;
if (!parse_int64(buf, (int64 *) &creation_time, 0))
elog_stderr(ERROR, "rotation file \"%s\" has wrong "
"creation timestamp \"%s\"",
control, buf);
/* Parsed creation time */
rotation_requested = (cur_time - creation_time) >
/* convert to seconds */
log_rotation_age * 60;
}
else
elog_stderr(ERROR, "cannot read creation timestamp from "
"rotation file \"%s\"", control);
fclose(control_file);
}
}
/* Check for rotation by size */
if (!rotation_requested && log_rotation_size > 0)
rotation_requested = st.st_size >=
/* convert to bytes */
log_rotation_size * 1024L;
}
logfile_open:
if (rotation_requested)
*file = logfile_open(filename, "w");
else
*file = logfile_open(filename, "a");
pfree(filename);
/* Rewrite rotation control file */
if (rotation_requested || !logfile_exists)
{
time_t timestamp = time(NULL);
control_file = fopen(control, "w");
if (control_file == NULL)
elog_stderr(ERROR, "cannot open rotation file \"%s\": %s",
control, strerror(errno));
fprintf(control_file, "%ld", timestamp);
fclose(control_file);
}
/*
* Arrange to close opened file at proc_exit.
*/
if (!exit_hook_registered)
{
atexit(release_logfile);
exit_hook_registered = true;
}
}
/*
* Closes opened file.
*/
static void
release_logfile(void)
{
if (log_file)
{
fclose(log_file);
log_file = NULL;
}
if (error_log_file)
{
fclose(error_log_file);
error_log_file = NULL;
}
}

54
src/utils/logger.h Normal file
View File

@ -0,0 +1,54 @@
/*-------------------------------------------------------------------------
*
* logger.h: - prototypes of logger functions.
*
* Copyright (c) 2017-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#ifndef LOGGER_H
#define LOGGER_H
#include "postgres_fe.h"
#define LOG_NONE (-10)
/* Log level */
#define VERBOSE (-5)
#define LOG (-4)
#define INFO (-3)
#define NOTICE (-2)
#define WARNING (-1)
#define ERROR 1
#define LOG_OFF 10
/* Logger parameters */
extern int log_to_file;
extern int log_level_console;
extern int log_level_file;
extern char *log_filename;
extern char *error_log_filename;
extern char *log_directory;
extern char log_path[MAXPGPATH];
#define LOG_ROTATION_SIZE_DEFAULT 0
#define LOG_ROTATION_AGE_DEFAULT 0
extern int log_rotation_size;
extern int log_rotation_age;
#define LOG_LEVEL_CONSOLE_DEFAULT INFO
#define LOG_LEVEL_FILE_DEFAULT LOG_OFF
#undef elog
extern void elog(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3);
extern void elog_file(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3);
extern void init_logger(const char *root_path);
extern int parse_log_level(const char *level);
extern const char *deparse_log_level(int level);
#endif /* LOGGER_H */

196
src/utils/parray.c Normal file
View File

@ -0,0 +1,196 @@
/*-------------------------------------------------------------------------
*
* parray.c: pointer array collection.
*
* Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
*
*-------------------------------------------------------------------------
*/
#include "src/pg_probackup.h"
/* members of struct parray are hidden from client. */
struct parray
{
void **data; /* poiter array, expanded if necessary */
size_t alloced; /* number of elements allocated */
size_t used; /* number of elements in use */
};
/*
* Create new parray object.
* Never returns NULL.
*/
parray *
parray_new(void)
{
parray *a = pgut_new(parray);
a->data = NULL;
a->used = 0;
a->alloced = 0;
parray_expand(a, 1024);
return a;
}
/*
* Expand array pointed by data to newsize.
* Elements in expanded area are initialized to NULL.
* Note: never returns NULL.
*/
void
parray_expand(parray *array, size_t newsize)
{
void **p;
/* already allocated */
if (newsize <= array->alloced)
return;
p = pgut_realloc(array->data, sizeof(void *) * newsize);
/* initialize expanded area to NULL */
memset(p + array->alloced, 0, (newsize - array->alloced) * sizeof(void *));
array->alloced = newsize;
array->data = p;
}
void
parray_free(parray *array)
{
if (array == NULL)
return;
free(array->data);
free(array);
}
void
parray_append(parray *array, void *elem)
{
if (array->used + 1 > array->alloced)
parray_expand(array, array->alloced * 2);
array->data[array->used++] = elem;
}
void
parray_insert(parray *array, size_t index, void *elem)
{
if (array->used + 1 > array->alloced)
parray_expand(array, array->alloced * 2);
memmove(array->data + index + 1, array->data + index,
(array->alloced - index - 1) * sizeof(void *));
array->data[index] = elem;
/* adjust used count */
if (array->used < index + 1)
array->used = index + 1;
else
array->used++;
}
/*
* Concatinate two parray.
* parray_concat() appends the copy of the content of src to the end of dest.
*/
parray *
parray_concat(parray *dest, const parray *src)
{
/* expand head array */
parray_expand(dest, dest->used + src->used);
/* copy content of src after content of dest */
memcpy(dest->data + dest->used, src->data, src->used * sizeof(void *));
dest->used += parray_num(src);
return dest;
}
void
parray_set(parray *array, size_t index, void *elem)
{
if (index > array->alloced - 1)
parray_expand(array, index + 1);
array->data[index] = elem;
/* adjust used count */
if (array->used < index + 1)
array->used = index + 1;
}
void *
parray_get(const parray *array, size_t index)
{
if (index > array->alloced - 1)
return NULL;
return array->data[index];
}
void *
parray_remove(parray *array, size_t index)
{
void *val;
/* removing unused element */
if (index > array->used)
return NULL;
val = array->data[index];
/* Do not move if the last element was removed. */
if (index < array->alloced - 1)
memmove(array->data + index, array->data + index + 1,
(array->alloced - index - 1) * sizeof(void *));
/* adjust used count */
array->used--;
return val;
}
bool
parray_rm(parray *array, const void *key, int(*compare)(const void *, const void *))
{
int i;
for (i = 0; i < array->used; i++)
{
if (compare(&key, &array->data[i]) == 0)
{
parray_remove(array, i);
return true;
}
}
return false;
}
size_t
parray_num(const parray *array)
{
return array->used;
}
void
parray_qsort(parray *array, int(*compare)(const void *, const void *))
{
qsort(array->data, array->used, sizeof(void *), compare);
}
void
parray_walk(parray *array, void (*action)(void *))
{
int i;
for (i = 0; i < array->used; i++)
action(array->data[i]);
}
void *
parray_bsearch(parray *array, const void *key, int(*compare)(const void *, const void *))
{
return bsearch(&key, array->data, array->used, sizeof(void *), compare);
}

35
src/utils/parray.h Normal file
View File

@ -0,0 +1,35 @@
/*-------------------------------------------------------------------------
*
* parray.h: pointer array collection.
*
* Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
*
*-------------------------------------------------------------------------
*/
#ifndef PARRAY_H
#define PARRAY_H
/*
* "parray" hold pointers to objects in a linear memory area.
* Client use "parray *" to access parray object.
*/
typedef struct parray parray;
extern parray *parray_new(void);
extern void parray_expand(parray *array, size_t newnum);
extern void parray_free(parray *array);
extern void parray_append(parray *array, void *val);
extern void parray_insert(parray *array, size_t index, void *val);
extern parray *parray_concat(parray *head, const parray *tail);
extern void parray_set(parray *array, size_t index, void *val);
extern void *parray_get(const parray *array, size_t index);
extern void *parray_remove(parray *array, size_t index);
extern bool parray_rm(parray *array, const void *key, int(*compare)(const void *, const void *));
extern size_t parray_num(const parray *array);
extern void parray_qsort(parray *array, int(*compare)(const void *, const void *));
extern void *parray_bsearch(parray *array, const void *key, int(*compare)(const void *, const void *));
extern void parray_walk(parray *array, void (*action)(void *));
#endif /* PARRAY_H */

2417
src/utils/pgut.c Normal file

File diff suppressed because it is too large Load Diff

238
src/utils/pgut.h Normal file
View File

@ -0,0 +1,238 @@
/*-------------------------------------------------------------------------
*
* pgut.h
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2017-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#ifndef PGUT_H
#define PGUT_H
#include "libpq-fe.h"
#include "pqexpbuffer.h"
#include <assert.h>
#include <sys/time.h>
#include "access/xlogdefs.h"
#include "logger.h"
#if !defined(C_H) && !defined(__cplusplus)
#ifndef bool
typedef char bool;
#endif
#ifndef true
#define true ((bool) 1)
#endif
#ifndef false
#define false ((bool) 0)
#endif
#endif
#define INFINITE_STR "INFINITE"
typedef enum pgut_optsrc
{
SOURCE_DEFAULT,
SOURCE_FILE_STRICT,
SOURCE_ENV,
SOURCE_FILE,
SOURCE_CMDLINE,
SOURCE_CONST
} pgut_optsrc;
/*
* type:
* b: bool (true)
* B: bool (false)
* f: pgut_optfn
* i: 32bit signed integer
* u: 32bit unsigned integer
* I: 64bit signed integer
* U: 64bit unsigned integer
* s: string
* t: time_t
*/
typedef struct pgut_option
{
char type;
uint8 sname; /* short name */
const char *lname; /* long name */
void *var; /* pointer to variable */
pgut_optsrc allowed; /* allowed source */
pgut_optsrc source; /* actual source */
int flags; /* option unit */
} pgut_option;
typedef void (*pgut_optfn) (pgut_option *opt, const char *arg);
typedef void (*pgut_atexit_callback)(bool fatal, void *userdata);
/*
* bit values in "flags" of an option
*/
#define OPTION_UNIT_KB 0x1000 /* value is in kilobytes */
#define OPTION_UNIT_BLOCKS 0x2000 /* value is in blocks */
#define OPTION_UNIT_XBLOCKS 0x3000 /* value is in xlog blocks */
#define OPTION_UNIT_XSEGS 0x4000 /* value is in xlog segments */
#define OPTION_UNIT_MEMORY 0xF000 /* mask for size-related units */
#define OPTION_UNIT_MS 0x10000 /* value is in milliseconds */
#define OPTION_UNIT_S 0x20000 /* value is in seconds */
#define OPTION_UNIT_MIN 0x30000 /* value is in minutes */
#define OPTION_UNIT_TIME 0xF0000 /* mask for time-related units */
#define OPTION_UNIT (OPTION_UNIT_MEMORY | OPTION_UNIT_TIME)
/*
* pgut client variables and functions
*/
extern const char *PROGRAM_NAME;
extern const char *PROGRAM_VERSION;
extern const char *PROGRAM_URL;
extern const char *PROGRAM_EMAIL;
extern void pgut_help(bool details);
/*
* pgut framework variables and functions
*/
extern const char *pgut_dbname;
extern const char *host;
extern const char *port;
extern const char *username;
extern bool prompt_password;
extern bool force_password;
extern bool interrupted;
extern bool in_cleanup;
extern bool in_password; /* User prompts password */
extern int pgut_getopt(int argc, char **argv, pgut_option options[]);
extern int pgut_readopt(const char *path, pgut_option options[], int elevel,
bool strict);
extern void pgut_getopt_env(pgut_option options[]);
extern void pgut_atexit_push(pgut_atexit_callback callback, void *userdata);
extern void pgut_atexit_pop(pgut_atexit_callback callback, void *userdata);
/*
* Database connections
*/
extern char *pgut_get_conninfo_string(PGconn *conn);
extern PGconn *pgut_connect(const char *dbname);
extern PGconn *pgut_connect_extended(const char *pghost, const char *pgport,
const char *dbname, const char *login);
extern PGconn *pgut_connect_replication(const char *dbname);
extern PGconn *pgut_connect_replication_extended(const char *pghost, const char *pgport,
const char *dbname, const char *pguser);
extern void pgut_disconnect(PGconn *conn);
extern PGresult *pgut_execute(PGconn* conn, const char *query, int nParams,
const char **params);
extern PGresult *pgut_execute_extended(PGconn* conn, const char *query, int nParams,
const char **params, bool text_result, bool ok_error);
extern PGresult *pgut_execute_parallel(PGconn* conn, PGcancel* thread_cancel_conn,
const char *query, int nParams,
const char **params, bool text_result);
extern bool pgut_send(PGconn* conn, const char *query, int nParams, const char **params, int elevel);
extern void pgut_cancel(PGconn* conn);
extern int pgut_wait(int num, PGconn *connections[], struct timeval *timeout);
extern const char *pgut_get_host(void);
extern const char *pgut_get_port(void);
extern void pgut_set_host(const char *new_host);
extern void pgut_set_port(const char *new_port);
/*
* memory allocators
*/
extern void *pgut_malloc(size_t size);
extern void *pgut_realloc(void *p, size_t size);
extern char *pgut_strdup(const char *str);
extern char *strdup_with_len(const char *str, size_t len);
extern char *strdup_trim(const char *str);
#define pgut_new(type) ((type *) pgut_malloc(sizeof(type)))
#define pgut_newarray(type, n) ((type *) pgut_malloc(sizeof(type) * (n)))
/*
* file operations
*/
extern FILE *pgut_fopen(const char *path, const char *mode, bool missing_ok);
/*
* Assert
*/
#undef Assert
#undef AssertArg
#undef AssertMacro
#ifdef USE_ASSERT_CHECKING
#define Assert(x) assert(x)
#define AssertArg(x) assert(x)
#define AssertMacro(x) assert(x)
#else
#define Assert(x) ((void) 0)
#define AssertArg(x) ((void) 0)
#define AssertMacro(x) ((void) 0)
#endif
/*
* StringInfo and string operations
*/
#define STRINGINFO_H
#define StringInfoData PQExpBufferData
#define StringInfo PQExpBuffer
#define makeStringInfo createPQExpBuffer
#define initStringInfo initPQExpBuffer
#define freeStringInfo destroyPQExpBuffer
#define termStringInfo termPQExpBuffer
#define resetStringInfo resetPQExpBuffer
#define enlargeStringInfo enlargePQExpBuffer
#define printfStringInfo printfPQExpBuffer /* reset + append */
#define appendStringInfo appendPQExpBuffer
#define appendStringInfoString appendPQExpBufferStr
#define appendStringInfoChar appendPQExpBufferChar
#define appendBinaryStringInfo appendBinaryPQExpBuffer
extern int appendStringInfoFile(StringInfo str, FILE *fp);
extern int appendStringInfoFd(StringInfo str, int fd);
extern bool parse_bool(const char *value, bool *result);
extern bool parse_bool_with_len(const char *value, size_t len, bool *result);
extern bool parse_int32(const char *value, int32 *result, int flags);
extern bool parse_uint32(const char *value, uint32 *result, int flags);
extern bool parse_int64(const char *value, int64 *result, int flags);
extern bool parse_uint64(const char *value, uint64 *result, int flags);
extern bool parse_time(const char *value, time_t *result, bool utc_default);
extern bool parse_int(const char *value, int *result, int flags,
const char **hintmsg);
extern bool parse_lsn(const char *value, XLogRecPtr *result);
extern void convert_from_base_unit(int64 base_value, int base_unit,
int64 *value, const char **unit);
extern void convert_from_base_unit_u(uint64 base_value, int base_unit,
uint64 *value, const char **unit);
#define IsSpace(c) (isspace((unsigned char)(c)))
#define IsAlpha(c) (isalpha((unsigned char)(c)))
#define IsAlnum(c) (isalnum((unsigned char)(c)))
#define IsIdentHead(c) (IsAlpha(c) || (c) == '_')
#define IsIdentBody(c) (IsAlnum(c) || (c) == '_')
#define ToLower(c) (tolower((unsigned char)(c)))
#define ToUpper(c) (toupper((unsigned char)(c)))
/*
* socket operations
*/
extern int wait_for_socket(int sock, struct timeval *timeout);
extern int wait_for_sockets(int nfds, fd_set *fds, struct timeval *timeout);
#ifdef WIN32
extern int sleep(unsigned int seconds);
extern int usleep(unsigned int usec);
#endif
#endif /* PGUT_H */

102
src/utils/thread.c Normal file
View File

@ -0,0 +1,102 @@
/*-------------------------------------------------------------------------
*
* thread.c: - multi-platform pthread implementations.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "thread.h"
pthread_t main_tid = 0;
#ifdef WIN32
#include <errno.h>
typedef struct win32_pthread
{
HANDLE handle;
void *(*routine) (void *);
void *arg;
void *result;
} win32_pthread;
static long mutex_initlock = 0;
static unsigned __stdcall
win32_pthread_run(void *arg)
{
win32_pthread *th = (win32_pthread *)arg;
th->result = th->routine(th->arg);
return 0;
}
int
pthread_create(pthread_t *thread,
pthread_attr_t *attr,
void *(*start_routine) (void *),
void *arg)
{
int save_errno;
win32_pthread *th;
th = (win32_pthread *)pg_malloc(sizeof(win32_pthread));
th->routine = start_routine;
th->arg = arg;
th->result = NULL;
th->handle = (HANDLE)_beginthreadex(NULL, 0, win32_pthread_run, th, 0, NULL);
if (th->handle == NULL)
{
save_errno = errno;
free(th);
return save_errno;
}
*thread = th;
return 0;
}
int
pthread_join(pthread_t th, void **thread_return)
{
if (th == NULL || th->handle == NULL)
return errno = EINVAL;
if (WaitForSingleObject(th->handle, INFINITE) != WAIT_OBJECT_0)
{
_dosmaperr(GetLastError());
return errno;
}
if (thread_return)
*thread_return = th->result;
CloseHandle(th->handle);
free(th);
return 0;
}
#endif /* WIN32 */
int
pthread_lock(pthread_mutex_t *mp)
{
#ifdef WIN32
if (*mp == NULL)
{
while (InterlockedExchange(&mutex_initlock, 1) == 1)
/* loop, another thread own the lock */ ;
if (*mp == NULL)
{
if (pthread_mutex_init(mp, NULL))
return -1;
}
InterlockedExchange(&mutex_initlock, 0);
}
#endif
return pthread_mutex_lock(mp);
}

35
src/utils/thread.h Normal file
View File

@ -0,0 +1,35 @@
/*-------------------------------------------------------------------------
*
* thread.h: - multi-platform pthread implementations.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#ifndef PROBACKUP_THREAD_H
#define PROBACKUP_THREAD_H
#ifdef WIN32
#include "postgres_fe.h"
#include "port/pthread-win32.h"
/* Use native win32 threads on Windows */
typedef struct win32_pthread *pthread_t;
typedef int pthread_attr_t;
#define PTHREAD_MUTEX_INITIALIZER NULL //{ NULL, 0 }
#define PTHREAD_ONCE_INIT false
extern int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
extern int pthread_join(pthread_t th, void **thread_return);
#else
/* Use platform-dependent pthread capability */
#include <pthread.h>
#endif
extern pthread_t main_tid;
extern int pthread_lock(pthread_mutex_t *mp);
#endif /* PROBACKUP_THREAD_H */

354
src/validate.c Normal file
View File

@ -0,0 +1,354 @@
/*-------------------------------------------------------------------------
*
* validate.c: validate backup files.
*
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2017, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <sys/stat.h>
#include <dirent.h>
#include "utils/thread.h"
static void *pgBackupValidateFiles(void *arg);
static void do_validate_instance(void);
static bool corrupted_backup_found = false;
typedef struct
{
parray *files;
bool corrupted;
/*
* Return value from the thread.
* 0 means there is no error, 1 - there is an error.
*/
int ret;
} validate_files_arg;
/*
* Validate backup files.
*/
void
pgBackupValidate(pgBackup *backup)
{
char base_path[MAXPGPATH];
char path[MAXPGPATH];
parray *files;
bool corrupted = false;
bool validation_isok = true;
/* arrays with meta info for multi threaded validate */
pthread_t *threads;
validate_files_arg *threads_args;
int i;
/* Revalidation is attempted for DONE, ORPHAN and CORRUPT backups */
if (backup->status != BACKUP_STATUS_OK &&
backup->status != BACKUP_STATUS_DONE &&
backup->status != BACKUP_STATUS_ORPHAN &&
backup->status != BACKUP_STATUS_CORRUPT)
{
elog(WARNING, "Backup %s has status %s. Skip validation.",
base36enc(backup->start_time), status2str(backup->status));
corrupted_backup_found = true;
return;
}
if (backup->status == BACKUP_STATUS_OK || backup->status == BACKUP_STATUS_DONE)
elog(INFO, "Validating backup %s", base36enc(backup->start_time));
else
elog(INFO, "Revalidating backup %s", base36enc(backup->start_time));
if (backup->backup_mode != BACKUP_MODE_FULL &&
backup->backup_mode != BACKUP_MODE_DIFF_PAGE &&
backup->backup_mode != BACKUP_MODE_DIFF_PTRACK &&
backup->backup_mode != BACKUP_MODE_DIFF_DELTA)
elog(WARNING, "Invalid backup_mode of backup %s", base36enc(backup->start_time));
pgBackupGetPath(backup, base_path, lengthof(base_path), DATABASE_DIR);
pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST);
files = dir_read_file_list(base_path, path);
/* setup threads */
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
pg_atomic_clear_flag(&file->lock);
}
/* init thread args with own file lists */
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
threads_args = (validate_files_arg *)
palloc(sizeof(validate_files_arg) * num_threads);
/* Validate files */
for (i = 0; i < num_threads; i++)
{
validate_files_arg *arg = &(threads_args[i]);
arg->files = files;
arg->corrupted = false;
/* By default there are some error */
threads_args[i].ret = 1;
pthread_create(&threads[i], NULL, pgBackupValidateFiles, arg);
}
/* Wait theads */
for (i = 0; i < num_threads; i++)
{
validate_files_arg *arg = &(threads_args[i]);
pthread_join(threads[i], NULL);
if (arg->corrupted)
corrupted = true;
if (arg->ret == 1)
validation_isok = false;
}
if (!validation_isok)
elog(ERROR, "Data files validation failed");
pfree(threads);
pfree(threads_args);
/* cleanup */
parray_walk(files, pgFileFree);
parray_free(files);
/* Update backup status */
backup->status = corrupted ? BACKUP_STATUS_CORRUPT : BACKUP_STATUS_OK;
pgBackupWriteBackupControlFile(backup);
if (corrupted)
elog(WARNING, "Backup %s data files are corrupted", base36enc(backup->start_time));
else
elog(INFO, "Backup %s data files are valid", base36enc(backup->start_time));
}
/*
* Validate files in the backup.
* NOTE: If file is not valid, do not use ERROR log message,
* rather throw a WARNING and set arguments->corrupted = true.
* This is necessary to update backup status.
*/
static void *
pgBackupValidateFiles(void *arg)
{
int i;
validate_files_arg *arguments = (validate_files_arg *)arg;
pg_crc32 crc;
for (i = 0; i < parray_num(arguments->files); i++)
{
struct stat st;
pgFile *file = (pgFile *) parray_get(arguments->files, i);
if (!pg_atomic_test_set_flag(&file->lock))
continue;
if (interrupted)
elog(ERROR, "Interrupted during validate");
/* Validate only regular files */
if (!S_ISREG(file->mode))
continue;
/*
* Skip files which has no data, because they
* haven't changed between backups.
*/
if (file->write_size == BYTES_INVALID)
continue;
/*
* Currently we don't compute checksums for
* cfs_compressed data files, so skip them.
*/
if (file->is_cfs)
continue;
/* print progress */
elog(VERBOSE, "Validate files: (%d/%lu) %s",
i + 1, (unsigned long) parray_num(arguments->files), file->path);
if (stat(file->path, &st) == -1)
{
if (errno == ENOENT)
elog(WARNING, "Backup file \"%s\" is not found", file->path);
else
elog(WARNING, "Cannot stat backup file \"%s\": %s",
file->path, strerror(errno));
arguments->corrupted = true;
break;
}
if (file->write_size != st.st_size)
{
elog(WARNING, "Invalid size of backup file \"%s\" : " INT64_FORMAT ". Expected %lu",
file->path, file->write_size, (unsigned long) st.st_size);
arguments->corrupted = true;
break;
}
crc = pgFileGetCRC(file->path);
if (crc != file->crc)
{
elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X",
file->path, file->crc, crc);
arguments->corrupted = true;
break;
}
}
/* Data files validation is successful */
arguments->ret = 0;
return NULL;
}
/*
* Validate all backups in the backup catalog.
* If --instance option was provided, validate only backups of this instance.
*/
int
do_validate_all(void)
{
if (instance_name == NULL)
{
/* Show list of instances */
char path[MAXPGPATH];
DIR *dir;
struct dirent *dent;
/* open directory and list contents */
join_path_components(path, backup_path, BACKUPS_DIR);
dir = opendir(path);
if (dir == NULL)
elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno));
errno = 0;
while ((dent = readdir(dir)))
{
char child[MAXPGPATH];
struct stat st;
/* skip entries point current dir or parent dir */
if (strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0)
continue;
join_path_components(child, path, dent->d_name);
if (lstat(child, &st) == -1)
elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno));
if (!S_ISDIR(st.st_mode))
continue;
instance_name = dent->d_name;
sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name);
sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name);
xlog_seg_size = get_config_xlog_seg_size();
do_validate_instance();
}
}
else
{
do_validate_instance();
}
if (corrupted_backup_found)
{
elog(WARNING, "Some backups are not valid");
return 1;
}
else
elog(INFO, "All backups are valid");
return 0;
}
/*
* Validate all backups in the given instance of the backup catalog.
*/
static void
do_validate_instance(void)
{
char *current_backup_id;
int i;
parray *backups;
pgBackup *current_backup = NULL;
elog(INFO, "Validate backups of the instance '%s'", instance_name);
/* Get exclusive lock of backup catalog */
catalog_lock();
/* Get list of all backups sorted in order of descending start time */
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
/* Examine backups one by one and validate them */
for (i = 0; i < parray_num(backups); i++)
{
current_backup = (pgBackup *) parray_get(backups, i);
/* Valiate each backup along with its xlog files. */
pgBackupValidate(current_backup);
/* Ensure that the backup has valid list of parent backups */
if (current_backup->status == BACKUP_STATUS_OK)
{
pgBackup *base_full_backup = current_backup;
if (current_backup->backup_mode != BACKUP_MODE_FULL)
{
base_full_backup = find_parent_backup(current_backup);
if (base_full_backup == NULL)
elog(ERROR, "Valid full backup for backup %s is not found.",
base36enc(current_backup->start_time));
}
/* Validate corresponding WAL files */
validate_wal(current_backup, arclog_path, 0,
0, 0, base_full_backup->tli, xlog_seg_size);
}
/* Mark every incremental backup between corrupted backup and nearest FULL backup as orphans */
if (current_backup->status == BACKUP_STATUS_CORRUPT)
{
int j;
corrupted_backup_found = true;
current_backup_id = base36enc_dup(current_backup->start_time);
for (j = i - 1; j >= 0; j--)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
if (backup->backup_mode == BACKUP_MODE_FULL)
break;
if (backup->status != BACKUP_STATUS_OK)
continue;
else
{
backup->status = BACKUP_STATUS_ORPHAN;
pgBackupWriteBackupControlFile(backup);
elog(WARNING, "Backup %s is orphaned because his parent %s is corrupted",
base36enc(backup->start_time), current_backup_id);
}
}
free(current_backup_id);
}
}
/* cleanup */
parray_walk(backups, pgBackupFree);
parray_free(backups);
}

24
tests/Readme.md Normal file
View File

@ -0,0 +1,24 @@
[см wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup)
```
Note: For now there are tests only for Linix
```
```
Check physical correctness of restored instances:
Apply this patch to disable HINT BITS: https://gist.github.com/gsmol/2bb34fd3ba31984369a72cc1c27a36b6
export PG_PROBACKUP_PARANOIA=ON
Check archive compression:
export ARCHIVE_COMPRESSION=ON
Specify path to pg_probackup binary file. By default tests use <Path to Git repository>/pg_probackup/
export PGPROBACKUPBIN=<path to pg_probackup>
Usage:
pip install testgres
pip install psycopg2
export PG_CONFIG=/path/to/pg_config
python -m unittest [-v] tests[.specific_module][.class.test]
```

69
tests/__init__.py Normal file
View File

@ -0,0 +1,69 @@
import unittest
from . import init_test, option_test, show_test, \
backup_test, delete_test, restore_test, validate_test, \
retention_test, ptrack_clean, ptrack_cluster, \
ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \
ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
ptrack_vacuum_full, ptrack_vacuum_truncate, pgpro560, pgpro589, \
false_positive, replica, compression, page, ptrack, archive, \
exclude, cfs_backup, cfs_restore, cfs_validate_backup, auth_test
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
# suite.addTests(loader.loadTestsFromModule(auth_test))
suite.addTests(loader.loadTestsFromModule(archive))
suite.addTests(loader.loadTestsFromModule(backup_test))
suite.addTests(loader.loadTestsFromModule(cfs_backup))
# suite.addTests(loader.loadTestsFromModule(cfs_restore))
# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup))
# suite.addTests(loader.loadTestsFromModule(logging))
suite.addTests(loader.loadTestsFromModule(compression))
suite.addTests(loader.loadTestsFromModule(delete_test))
suite.addTests(loader.loadTestsFromModule(exclude))
suite.addTests(loader.loadTestsFromModule(false_positive))
suite.addTests(loader.loadTestsFromModule(init_test))
suite.addTests(loader.loadTestsFromModule(option_test))
suite.addTests(loader.loadTestsFromModule(page))
suite.addTests(loader.loadTestsFromModule(ptrack))
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
suite.addTests(loader.loadTestsFromModule(ptrack_recovery))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_frozen))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_visibility))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_full))
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_truncate))
suite.addTests(loader.loadTestsFromModule(replica))
suite.addTests(loader.loadTestsFromModule(restore_test))
suite.addTests(loader.loadTestsFromModule(retention_test))
suite.addTests(loader.loadTestsFromModule(show_test))
suite.addTests(loader.loadTestsFromModule(validate_test))
suite.addTests(loader.loadTestsFromModule(pgpro560))
suite.addTests(loader.loadTestsFromModule(pgpro589))
return suite
# test_pgpro434_2 unexpected success
# ToDo:
# archive:
# discrepancy of instance`s SYSTEMID and node`s SYSTEMID should lead to archive-push refusal to work
# replica:
# backup should exit with correct error message if some master* option is missing
# --master* options shoukd not work when backuping master
# logging:
# https://jira.postgrespro.ru/browse/PGPRO-584
# https://jira.postgrespro.ru/secure/attachment/20420/20420_doc_logging.md
# ptrack:
# ptrack backup on replica should work correctly
# archive:
# immediate recovery and full recovery
# backward compatibility:
# previous version catalog must be readable by newer version
# incremental chain from previous version can be continued
# backups from previous version can be restored
# 10vanilla_1.3ptrack +
# 10vanilla+
# 9.6vanilla_1.3ptrack +

833
tests/archive.py Normal file
View File

@ -0,0 +1,833 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, archive_script
from datetime import datetime, timedelta
import subprocess
from sys import exit
from time import sleep
module_name = 'archive'
class ArchiveTest(ProbackupTest, unittest.TestCase):
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_pgpro434_1(self):
"""Description in jira issue PGPRO-434"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector from "
"generate_series(0,100) i")
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.backup_node(
backup_dir, 'node', node,
options=["--log-level-file=verbose"])
node.cleanup()
self.restore_node(
backup_dir, 'node', node)
node.slow_start()
# Recreate backup calagoue
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
# Make backup
self.backup_node(
backup_dir, 'node', node,
options=["--log-level-file=verbose"])
node.cleanup()
# Restore Database
self.restore_node(
backup_dir, 'node', node,
options=["--recovery-target-action=promote"])
node.slow_start()
self.assertEqual(
result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
'data after restore not equal to original data')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_pgpro434_2(self):
"""
Check that timelines are correct.
WAITING PGPRO-1053 for --immediate
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FIRST TIMELINE
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100) i")
backup_id = self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"insert into t_heap select 100501 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1) i")
# SECOND TIMELIN
node.cleanup()
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.slow_start()
if self.verbose:
print(node.safe_psql(
"postgres",
"select redo_wal_file from pg_control_checkpoint()"))
self.assertFalse(
node.execute(
"postgres",
"select exists(select 1 "
"from t_heap where id = 100501)")[0][0],
'data after restore not equal to original data')
node.safe_psql(
"postgres",
"insert into t_heap select 2 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(100,200) i")
backup_id = self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"insert into t_heap select 100502 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,256) i")
# THIRD TIMELINE
node.cleanup()
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.slow_start()
if self.verbose:
print(
node.safe_psql(
"postgres",
"select redo_wal_file from pg_control_checkpoint()"))
node.safe_psql(
"postgres",
"insert into t_heap select 3 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(200,300) i")
backup_id = self.backup_node(backup_dir, 'node', node)
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
node.safe_psql(
"postgres",
"insert into t_heap select 100503 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,256) i")
# FOURTH TIMELINE
node.cleanup()
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.slow_start()
if self.verbose:
print('Fourth timeline')
print(node.safe_psql(
"postgres",
"select redo_wal_file from pg_control_checkpoint()"))
# FIFTH TIMELINE
node.cleanup()
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.slow_start()
if self.verbose:
print('Fifth timeline')
print(node.safe_psql(
"postgres",
"select redo_wal_file from pg_control_checkpoint()"))
# SIXTH TIMELINE
node.cleanup()
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.slow_start()
if self.verbose:
print('Sixth timeline')
print(node.safe_psql(
"postgres",
"select redo_wal_file from pg_control_checkpoint()"))
self.assertFalse(
node.execute(
"postgres",
"select exists(select 1 from t_heap where id > 100500)")[0][0],
'data after restore not equal to original data')
self.assertEqual(
result,
node.safe_psql(
"postgres",
"SELECT * FROM t_heap"),
'data after restore not equal to original data')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_pgpro434_3(self):
"""Check pg_stop_backup_timeout, needed backup_timeout"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
archive_script_path = os.path.join(backup_dir, 'archive_script.sh')
with open(archive_script_path, 'w+') as f:
f.write(
archive_script.format(
backup_dir=backup_dir, node_name='node', count_limit=2))
st = os.stat(archive_script_path)
os.chmod(archive_script_path, st.st_mode | 0o111)
node.append_conf(
'postgresql.auto.conf', "archive_command = '{0} %p %f'".format(
archive_script_path))
node.slow_start()
try:
self.backup_node(
backup_dir, 'node', node,
options=[
"--archive-timeout=60",
"--log-level-file=verbose",
"--stream"]
)
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because pg_stop_backup failed to answer.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"ERROR: pg_stop_backup doesn't answer" in e.message and
"cancel it" in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
log_file = os.path.join(node.logs_dir, 'postgresql.log')
with open(log_file, 'r') as f:
log_content = f.read()
self.assertNotIn(
'FailedAssertion',
log_content,
'PostgreSQL crashed because of a failed assert')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_arhive_push_file_exists(self):
"""Archive-push if file exists"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
wals_dir = os.path.join(backup_dir, 'wal', 'node')
if self.archive_compress:
file = os.path.join(wals_dir, '000000010000000000000001.gz')
else:
file = os.path.join(wals_dir, '000000010000000000000001')
with open(file, 'a') as f:
f.write(b"blablablaadssaaaaaaaaaaaaaaa")
f.flush()
f.close()
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100500) i")
log_file = os.path.join(node.logs_dir, 'postgresql.log')
with open(log_file, 'r') as f:
log_content = f.read()
self.assertTrue(
'LOG: archive command failed with exit code 1' in log_content and
'DETAIL: The failed archive command was:' in log_content and
'INFO: pg_probackup archive-push from' in log_content and
'ERROR: WAL segment "{0}" already exists.'.format(file) in log_content,
'Expecting error messages about failed archive_command'
)
self.assertFalse('pg_probackup archive-push completed successfully' in log_content)
os.remove(file)
self.switch_wal_segment(node)
sleep(5)
with open(log_file, 'r') as f:
log_content = f.read()
self.assertTrue(
'pg_probackup archive-push completed successfully' in log_content,
'Expecting messages about successfull execution archive_command')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_arhive_push_file_exists_overwrite(self):
"""Archive-push if file exists"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
wals_dir = os.path.join(backup_dir, 'wal', 'node')
if self.archive_compress:
file = os.path.join(wals_dir, '000000010000000000000001.gz')
else:
file = os.path.join(wals_dir, '000000010000000000000001')
with open(file, 'a') as f:
f.write(b"blablablaadssaaaaaaaaaaaaaaa")
f.flush()
f.close()
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100500) i")
log_file = os.path.join(node.logs_dir, 'postgresql.log')
with open(log_file, 'r') as f:
log_content = f.read()
self.assertTrue(
'LOG: archive command failed with exit code 1' in log_content and
'DETAIL: The failed archive command was:' in log_content and
'INFO: pg_probackup archive-push from' in log_content and
'ERROR: WAL segment "{0}" already exists.'.format(file) in log_content,
'Expecting error messages about failed archive_command'
)
self.assertFalse('pg_probackup archive-push completed successfully' in log_content)
self.set_archiving(backup_dir, 'node', node, overwrite=True)
node.reload()
self.switch_wal_segment(node)
sleep(2)
with open(log_file, 'r') as f:
log_content = f.read()
self.assertTrue(
'pg_probackup archive-push completed successfully' in log_content,
'Expecting messages about successfull execution archive_command')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_replica_archive(self):
"""
make node without archiving, take stream backup and
turn it into replica, set replica with archiving,
make archive backup from replica
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'max_wal_size': '1GB'}
)
self.init_pb(backup_dir)
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,256) i")
self.backup_node(backup_dir, 'master', master, options=['--stream'])
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
# Settings for Replica
self.restore_node(backup_dir, 'master', replica)
self.set_replica(master, replica, synchronous=True)
self.add_instance(backup_dir, 'replica', replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Check data correctness on replica
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Change data on master, take FULL backup from replica,
# restore taken backup and check that restored data equal
# to original data
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(256,512) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
# ADD INSTANCE 'REPLICA'
sleep(1)
backup_id = self.backup_node(
backup_dir, 'replica', replica,
options=[
'--archive-timeout=30',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
self.validate_pb(backup_dir, 'replica')
self.assertEqual(
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
# RESTORE FULL BACKUP TAKEN FROM replica
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname))
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Change data on master, make PAGE backup from replica,
# restore taken backup and check that restored data equal
# to original data
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(512,768) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
backup_id = self.backup_node(
backup_dir, 'replica',
replica, backup_type='page',
options=[
'--archive-timeout=30', '--log-level-file=verbose',
'--master-host=localhost', '--master-db=postgres',
'--master-port={0}'.format(master.port)]
)
self.validate_pb(backup_dir, 'replica')
self.assertEqual(
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
# RESTORE PAGE BACKUP TAKEN FROM replica
node.cleanup()
self.restore_node(
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_master_and_replica_parallel_archiving(self):
"""
make node 'master 'with archiving,
take archive backup and turn it into replica,
set replica with archiving, make archive backup from replica,
make archive backup from master
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s'}
)
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.init_pb(backup_dir)
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)
master.slow_start()
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,10000) i")
# TAKE FULL ARCHIVE BACKUP FROM MASTER
self.backup_node(backup_dir, 'master', master)
# GET LOGICAL CONTENT FROM MASTER
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
# GET PHYSICAL CONTENT FROM MASTER
pgdata_master = self.pgdata_content(master.data_dir)
# Settings for Replica
self.restore_node(backup_dir, 'master', replica)
# CHECK PHYSICAL CORRECTNESS on REPLICA
pgdata_replica = self.pgdata_content(replica.data_dir)
self.compare_pgdata(pgdata_master, pgdata_replica)
self.set_replica(master, replica, synchronous=True)
# ADD INSTANCE REPLICA
self.add_instance(backup_dir, 'replica', replica)
# SET ARCHIVING FOR REPLICA
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# CHECK LOGICAL CORRECTNESS on REPLICA
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# TAKE FULL ARCHIVE BACKUP FROM REPLICA
backup_id = self.backup_node(
backup_dir, 'replica', replica,
options=[
'--archive-timeout=20',
'--log-level-file=verbose',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)]
)
self.validate_pb(backup_dir, 'replica')
self.assertEqual(
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
# TAKE FULL ARCHIVE BACKUP FROM MASTER
backup_id = self.backup_node(backup_dir, 'master', master)
self.validate_pb(backup_dir, 'master')
self.assertEqual(
'OK', self.show_pb(backup_dir, 'master', backup_id)['status'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_master_and_replica_concurrent_archiving(self):
"""
make node 'master 'with archiving,
take archive backup and turn it into replica,
set replica with archiving, make archive backup from replica,
make archive backup from master
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s'}
)
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.init_pb(backup_dir)
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)
master.slow_start()
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,10000) i")
# TAKE FULL ARCHIVE BACKUP FROM MASTER
self.backup_node(backup_dir, 'master', master)
# GET LOGICAL CONTENT FROM MASTER
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
# GET PHYSICAL CONTENT FROM MASTER
pgdata_master = self.pgdata_content(master.data_dir)
# Settings for Replica
self.restore_node(
backup_dir, 'master', replica)
# CHECK PHYSICAL CORRECTNESS on REPLICA
pgdata_replica = self.pgdata_content(replica.data_dir)
self.compare_pgdata(pgdata_master, pgdata_replica)
self.set_replica(master, replica, synchronous=True)
# ADD INSTANCE REPLICA
# self.add_instance(backup_dir, 'replica', replica)
# SET ARCHIVING FOR REPLICA
# self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# CHECK LOGICAL CORRECTNESS on REPLICA
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,10000) i")
# TAKE FULL ARCHIVE BACKUP FROM REPLICA
backup_id = self.backup_node(
backup_dir, 'master', replica,
options=[
'--archive-timeout=30',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
self.validate_pb(backup_dir, 'master')
self.assertEqual(
'OK', self.show_pb(backup_dir, 'master', backup_id)['status'])
# TAKE FULL ARCHIVE BACKUP FROM MASTER
backup_id = self.backup_node(backup_dir, 'master', master)
self.validate_pb(backup_dir, 'master')
self.assertEqual(
'OK', self.show_pb(backup_dir, 'master', backup_id)['status'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_archive_pg_receivexlog(self):
"""Test backup with pg_receivexlog wal delivary method"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
if self.get_version(node) < 100000:
pg_receivexlog_path = self.get_bin_path('pg_receivexlog')
else:
pg_receivexlog_path = self.get_bin_path('pg_receivewal')
pg_receivexlog = self.run_binary(
[
pg_receivexlog_path, '-p', str(node.port), '--synchronous',
'-D', os.path.join(backup_dir, 'wal', 'node')
], async=True)
if pg_receivexlog.returncode:
self.assertFalse(
True,
'Failed to start pg_receivexlog: {0}'.format(
pg_receivexlog.communicate()[1]))
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,10000) i")
self.backup_node(backup_dir, 'node', node)
# PAGE
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(10000,20000) i")
self.backup_node(
backup_dir,
'node',
node,
backup_type='page'
)
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.validate_pb(backup_dir)
# Check data correctness
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.slow_start()
self.assertEqual(
result,
node.safe_psql(
"postgres", "SELECT * FROM t_heap"
),
'data after restore not equal to original data')
# Clean after yourself
pg_receivexlog.kill()
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_archive_pg_receivexlog_compression_pg10(self):
"""Test backup with pg_receivewal compressed wal delivary method"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
if self.get_version(node) < self.version_to_num('10.0'):
return unittest.skip('You need PostgreSQL 10 for this test')
else:
pg_receivexlog_path = self.get_bin_path('pg_receivewal')
pg_receivexlog = self.run_binary(
[
pg_receivexlog_path, '-p', str(node.port), '--synchronous',
'-Z', '9', '-D', os.path.join(backup_dir, 'wal', 'node')
], async=True)
if pg_receivexlog.returncode:
self.assertFalse(
True,
'Failed to start pg_receivexlog: {0}'.format(
pg_receivexlog.communicate()[1]))
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,10000) i")
self.backup_node(backup_dir, 'node', node)
# PAGE
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(10000,20000) i")
self.backup_node(
backup_dir, 'node', node,
backup_type='page'
)
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.validate_pb(backup_dir)
# Check data correctness
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.slow_start()
self.assertEqual(
result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
'data after restore not equal to original data')
# Clean after yourself
pg_receivexlog.kill()
self.del_test_dir(module_name, fname)

391
tests/auth_test.py Normal file
View File

@ -0,0 +1,391 @@
"""
The Test suite check behavior of pg_probackup utility, if password is required for connection to PostgreSQL instance.
- https://confluence.postgrespro.ru/pages/viewpage.action?pageId=16777522
"""
import os
import unittest
import signal
import time
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from testgres import StartNodeException
module_name = 'auth_test'
skip_test = False
try:
from pexpect import *
except ImportError:
skip_test = True
class SimpleAuthTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_backup_via_unpriviledged_user(self):
"""
Make node, create unpriviledged user, try to
run a backups without EXECUTE rights on
certain functions
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql("postgres", "CREATE ROLE backup with LOGIN")
try:
self.backup_node(
backup_dir, 'node', node, options=['-U', 'backup'])
self.assertEqual(
1, 0,
"Expecting Error due to missing grant on EXECUTE.")
except ProbackupException as e:
self.assertIn(
"ERROR: query failed: ERROR: permission denied "
"for function pg_start_backup", e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
node.safe_psql(
"postgres",
"GRANT EXECUTE ON FUNCTION"
" pg_start_backup(text, boolean, boolean) TO backup;")
time.sleep(1)
try:
self.backup_node(
backup_dir, 'node', node, options=['-U', 'backup'])
self.assertEqual(
1, 0,
"Expecting Error due to missing grant on EXECUTE.")
except ProbackupException as e:
self.assertIn(
"ERROR: query failed: ERROR: permission denied for function "
"pg_create_restore_point\nquery was: "
"SELECT pg_catalog.pg_create_restore_point($1)", e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
node.safe_psql(
"postgres",
"GRANT EXECUTE ON FUNCTION"
" pg_create_restore_point(text) TO backup;")
time.sleep(1)
try:
self.backup_node(
backup_dir, 'node', node, options=['-U', 'backup'])
self.assertEqual(
1, 0,
"Expecting Error due to missing grant on EXECUTE.")
except ProbackupException as e:
self.assertIn(
"ERROR: query failed: ERROR: permission denied "
"for function pg_stop_backup", e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
if self.get_version(node) < self.version_to_num('10.0'):
node.safe_psql(
"postgres",
"GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup")
else:
node.safe_psql(
"postgres",
"GRANT EXECUTE ON FUNCTION "
"pg_stop_backup(boolean, boolean) TO backup")
# Do this for ptrack backups
node.safe_psql(
"postgres",
"GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup")
self.backup_node(
backup_dir, 'node', node, options=['-U', 'backup'])
node.safe_psql("postgres", "CREATE DATABASE test1")
self.backup_node(
backup_dir, 'node', node, options=['-U', 'backup'])
node.safe_psql(
"test1", "create table t1 as select generate_series(0,100)")
node.append_conf("postgresql.auto.conf", "ptrack_enable = 'on'")
node.restart()
try:
self.backup_node(
backup_dir, 'node', node, options=['-U', 'backup'])
self.assertEqual(
1, 0,
"Expecting Error due to missing grant on clearing ptrack_files.")
except ProbackupException as e:
self.assertIn(
"ERROR: must be superuser or replication role to clear ptrack files\n"
"query was: SELECT pg_catalog.pg_ptrack_clear()", e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
time.sleep(1)
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=['-U', 'backup'])
self.assertEqual(
1, 0,
"Expecting Error due to missing grant on clearing ptrack_files.")
except ProbackupException as e:
self.assertIn(
"ERROR: must be superuser or replication role read ptrack files\n"
"query was: select pg_catalog.pg_ptrack_control_lsn()", e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
node.safe_psql(
"postgres",
"ALTER ROLE backup REPLICATION")
time.sleep(1)
# FULL
self.backup_node(
backup_dir, 'node', node,
options=['-U', 'backup'])
# PTRACK
self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=['-U', 'backup'])
# Clean after yourself
self.del_test_dir(module_name, fname)
class AuthTest(unittest.TestCase):
pb = None
node = None
@classmethod
def setUpClass(cls):
super(AuthTest, cls).setUpClass()
cls.pb = ProbackupTest()
cls.backup_dir = os.path.join(cls.pb.tmp_path, module_name, 'backup')
cls.node = cls.pb.make_simple_node(
base_dir="{}/node".format(module_name),
set_replication=True,
initdb_params=['--data-checksums', '--auth-host=md5'],
pg_options={
'wal_level': 'replica'
}
)
modify_pg_hba(cls.node)
cls.pb.init_pb(cls.backup_dir)
cls.pb.add_instance(cls.backup_dir, cls.node.name, cls.node)
cls.pb.set_archiving(cls.backup_dir, cls.node.name, cls.node)
try:
cls.node.start()
except StartNodeException:
raise unittest.skip("Node hasn't started")
cls.node.safe_psql("postgres",
"CREATE ROLE backup WITH LOGIN PASSWORD 'password'; \
GRANT USAGE ON SCHEMA pg_catalog TO backup; \
GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; \
GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; \
GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; \
GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; \
GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; \
GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; \
GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; \
GRANT EXECUTE ON FUNCTION txid_current() TO backup; \
GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; \
GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup; \
GRANT EXECUTE ON FUNCTION pg_ptrack_clear() TO backup; \
GRANT EXECUTE ON FUNCTION pg_ptrack_get_and_clear(oid, oid) TO backup;")
cls.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass')
@classmethod
def tearDownClass(cls):
cls.node.cleanup()
cls.pb.del_test_dir(module_name, '')
@unittest.skipIf(skip_test, "Module pexpect isn't installed. You need to install it.")
def setUp(self):
self.cmd = ['backup',
'-B', self.backup_dir,
'--instance', self.node.name,
'-h', '127.0.0.1',
'-p', str(self.node.port),
'-U', 'backup',
'-b', 'FULL'
]
def tearDown(self):
if "PGPASSWORD" in self.pb.test_env.keys():
del self.pb.test_env["PGPASSWORD"]
if "PGPASSWORD" in self.pb.test_env.keys():
del self.pb.test_env["PGPASSFILE"]
try:
os.remove(self.pgpass_file)
except OSError:
pass
def test_empty_password(self):
""" Test case: PGPB_AUTH03 - zero password length """
try:
self.assertIn("ERROR: no password supplied",
str(run_pb_with_auth([self.pb.probackup_path] + self.cmd, '\0\r\n'))
)
except (TIMEOUT, ExceptionPexpect) as e:
self.fail(e.value)
def test_wrong_password(self):
""" Test case: PGPB_AUTH04 - incorrect password """
try:
self.assertIn("password authentication failed",
str(run_pb_with_auth([self.pb.probackup_path] + self.cmd, 'wrong_password\r\n'))
)
except (TIMEOUT, ExceptionPexpect) as e:
self.fail(e.value)
def test_right_password(self):
""" Test case: PGPB_AUTH01 - correct password """
try:
self.assertIn("completed",
str(run_pb_with_auth([self.pb.probackup_path] + self.cmd, 'password\r\n'))
)
except (TIMEOUT, ExceptionPexpect) as e:
self.fail(e.value)
def test_right_password_and_wrong_pgpass(self):
""" Test case: PGPB_AUTH05 - correct password and incorrect .pgpass (-W)"""
line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password'])
create_pgpass(self.pgpass_file, line)
try:
self.assertIn("completed",
str(run_pb_with_auth([self.pb.probackup_path] + self.cmd + ['-W'], 'password\r\n'))
)
except (TIMEOUT, ExceptionPexpect) as e:
self.fail(e.value)
def test_ctrl_c_event(self):
""" Test case: PGPB_AUTH02 - send interrupt signal """
try:
run_pb_with_auth([self.pb.probackup_path] + self.cmd, kill=True)
except TIMEOUT:
self.fail("Error: CTRL+C event ignored")
def test_pgpassfile_env(self):
""" Test case: PGPB_AUTH06 - set environment var PGPASSFILE """
path = os.path.join(self.pb.tmp_path, module_name, 'pgpass.conf')
line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'password'])
create_pgpass(path, line)
self.pb.test_env["PGPASSFILE"] = path
try:
self.assertEqual(
"OK",
self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"],
"ERROR: Full backup status is not valid."
)
except ProbackupException as e:
self.fail(e)
def test_pgpass(self):
""" Test case: PGPB_AUTH07 - Create file .pgpass in home dir. """
line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'password'])
create_pgpass(self.pgpass_file, line)
try:
self.assertEqual(
"OK",
self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"],
"ERROR: Full backup status is not valid."
)
except ProbackupException as e:
self.fail(e)
def test_pgpassword(self):
""" Test case: PGPB_AUTH08 - set environment var PGPASSWORD """
self.pb.test_env["PGPASSWORD"] = "password"
try:
self.assertEqual(
"OK",
self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"],
"ERROR: Full backup status is not valid."
)
except ProbackupException as e:
self.fail(e)
def test_pgpassword_and_wrong_pgpass(self):
""" Test case: PGPB_AUTH09 - Check priority between PGPASSWORD and .pgpass file"""
line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password'])
create_pgpass(self.pgpass_file, line)
self.pb.test_env["PGPASSWORD"] = "password"
try:
self.assertEqual(
"OK",
self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"],
"ERROR: Full backup status is not valid."
)
except ProbackupException as e:
self.fail(e)
def run_pb_with_auth(cmd, password=None, kill=False):
try:
with spawn(" ".join(cmd), encoding='utf-8', timeout=10) as probackup:
result = probackup.expect(u"Password for user .*:", 5)
if kill:
probackup.kill(signal.SIGINT)
elif result == 0:
probackup.sendline(password)
probackup.expect(EOF)
return probackup.before
else:
raise ExceptionPexpect("Other pexpect errors.")
except TIMEOUT:
raise TIMEOUT("Timeout error.")
except ExceptionPexpect:
raise ExceptionPexpect("Pexpect error.")
def modify_pg_hba(node):
"""
Description:
Add trust authentication for user postgres. Need for add new role and set grant.
:param node:
:return None:
"""
hba_conf = os.path.join(node.data_dir, "pg_hba.conf")
with open(hba_conf, 'r+') as fio:
data = fio.read()
fio.seek(0)
fio.write('host\tall\tpostgres\t127.0.0.1/0\ttrust\n' + data)
def create_pgpass(path, line):
with open(path, 'w') as passfile:
# host:port:db:username:password
passfile.write(line)
os.chmod(path, 0o600)

522
tests/backup_test.py Normal file
View File

@ -0,0 +1,522 @@
import unittest
import os
from time import sleep
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from .helpers.cfs_helpers import find_by_name
module_name = 'backup'
class BackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
# PGPRO-707
def test_backup_modes_archive(self):
"""standart backup modes with ARCHIVE WAL method"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'ptrack_enable': 'on'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
show_backup = self.show_pb(backup_dir, 'node')[0]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "FULL")
# postmaster.pid and postmaster.opts shouldn't be copied
excluded = True
db_dir = os.path.join(
backup_dir, "backups", 'node', backup_id, "database")
for f in os.listdir(db_dir):
if (
os.path.isfile(os.path.join(db_dir, f)) and
(
f == "postmaster.pid" or
f == "postmaster.opts"
)
):
excluded = False
self.assertEqual(excluded, True)
# page backup mode
page_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type="page")
# print self.show_pb(node)
show_backup = self.show_pb(backup_dir, 'node')[1]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PAGE")
# Check parent backup
self.assertEqual(
backup_id,
self.show_pb(
backup_dir, 'node',
backup_id=show_backup['id'])["parent-backup-id"])
# ptrack backup mode
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
show_backup = self.show_pb(backup_dir, 'node')[2]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PTRACK")
# Check parent backup
self.assertEqual(
page_backup_id,
self.show_pb(
backup_dir, 'node',
backup_id=show_backup['id'])["parent-backup-id"])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_smooth_checkpoint(self):
"""full backup with smooth checkpoint"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.backup_node(
backup_dir, 'node', node,
options=["-C"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
node.stop()
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_incremental_backup_without_full(self):
"""page-level backup without validated full backup"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
try:
self.backup_node(backup_dir, 'node', node, backup_type="page")
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because page backup should not be possible "
"without valid full backup.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Valid backup on current timeline is not found. "
"Create new FULL backup before an incremental one.",
e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))
sleep(1)
try:
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because page backup should not be possible "
"without valid full backup.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Valid backup on current timeline is not found. "
"Create new FULL backup before an incremental one.",
e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))
self.assertEqual(
self.show_pb(backup_dir, 'node')[0]['status'],
"ERROR")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_incremental_backup_corrupt_full(self):
"""page-level backup with corrupted full backup"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
file = os.path.join(
backup_dir, "backups", "node", backup_id,
"database", "postgresql.conf")
os.remove(file)
try:
self.validate_pb(backup_dir, 'node')
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because of validation of corrupted backup.\n"
" Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"INFO: Validate backups of the instance 'node'\n" in e.message and
"WARNING: Backup file \"{0}\" is not found\n".format(
file) in e.message and
"WARNING: Backup {0} data files are corrupted\n".format(
backup_id) in e.message and
"WARNING: Some backups are not valid\n" in e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))
try:
self.backup_node(backup_dir, 'node', node, backup_type="page")
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because page backup should not be possible "
"without valid full backup.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn(
"ERROR: Valid backup on current timeline is not found. "
"Create new FULL backup before an incremental one.",
e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))
self.assertEqual(
self.show_pb(backup_dir, 'node', backup_id)['status'], "CORRUPT")
self.assertEqual(
self.show_pb(backup_dir, 'node')[1]['status'], "ERROR")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_threads(self):
"""ptrack multi thread backup mode"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.backup_node(
backup_dir, 'node', node,
backup_type="full", options=["-j", "4"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
self.backup_node(
backup_dir, 'node', node,
backup_type="ptrack", options=["-j", "4"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_threads_stream(self):
"""ptrack multi thread backup mode and stream"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'ptrack_enable': 'on',
'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
self.backup_node(
backup_dir, 'node', node,
backup_type="ptrack", options=["-j", "4", "--stream"])
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_page_corruption_heal_via_ptrack_1(self):
"""make node, corrupt some page, check that backup failed"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.backup_node(
backup_dir, 'node', node,
backup_type="full", options=["-j", "4", "--stream"])
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
node.safe_psql(
"postgres",
"CHECKPOINT;")
heap_path = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
f.seek(9000)
f.write(b"bla")
f.flush()
f.close
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream", '--log-level-file=verbose'])
# open log file and check
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
log_content = f.read()
self.assertIn('block 1, try to fetch via SQL', log_content)
self.assertIn('SELECT pg_catalog.pg_ptrack_get_block', log_content)
f.close
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['status'] == 'OK',
"Backup Status should be OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_page_corruption_heal_via_ptrack_2(self):
"""make node, corrupt some page, check that backup failed"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
node.safe_psql(
"postgres",
"CHECKPOINT;")
heap_path = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
node.stop()
with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
f.seek(9000)
f.write(b"bla")
f.flush()
f.close
node.start()
try:
self.backup_node(
backup_dir, 'node', node,
backup_type="full", options=["-j", "4", "--stream"])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because of page "
"corruption in PostgreSQL instance.\n"
" Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
"WARNING: File" in e.message and
"blknum" in e.message and
"have wrong checksum" in e.message and
"try to fetch via SQL" in e.message and
"WARNING: page verification failed, "
"calculated checksum" in e.message and
"ERROR: query failed: "
"ERROR: invalid page in block" in e.message and
"query was: SELECT pg_catalog.pg_ptrack_get_block_2" in e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR',
"Backup Status should be ERROR")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_tablespace_in_pgdata_pgpro_1376(self):
"""PGPRO-1376 """
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(
node, 'tblspace1',
tblspc_path=(
os.path.join(
node.data_dir, 'somedirectory', '100500'))
)
self.create_tblspace_in_node(
node, 'tblspace2',
tblspc_path=(os.path.join(node.data_dir))
)
node.safe_psql(
"postgres",
"create table t_heap1 tablespace tblspace1 as select 1 as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
node.safe_psql(
"postgres",
"create table t_heap2 tablespace tblspace2 as select 1 as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
try:
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because of too many levels "
"of symbolic linking\n"
" Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'Too many levels of symbolic links' in e.message,
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
repr(e.message), self.cmd))
node.safe_psql(
"postgres",
"drop table t_heap2")
node.safe_psql(
"postgres",
"drop tablespace tblspace2")
self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
pgdata = self.pgdata_content(node.data_dir)
relfilenode = node.safe_psql(
"postgres",
"select 't_heap1'::regclass::oid"
).rstrip()
list = []
for root, dirs, files in os.walk(backup_dir):
for file in files:
if file == relfilenode:
path = os.path.join(root, file)
list = list + [path]
# We expect that relfilenode occures only once
if len(list) > 1:
message = ""
for string in list:
message = message + string + "\n"
self.assertEqual(
1, 0,
"Following file copied twice by backup:\n {0}".format(
message)
)
node.cleanup()
self.restore_node(
backup_dir, 'node', node, options=["-j", "4"])
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)

1161
tests/cfs_backup.py Normal file

File diff suppressed because it is too large Load Diff

450
tests/cfs_restore.py Normal file
View File

@ -0,0 +1,450 @@
"""
restore
Syntax:
pg_probackup restore -B backupdir --instance instance_name
[-D datadir]
[ -i backup_id | [{--time=time | --xid=xid | --lsn=lsn } [--inclusive=boolean]]][--timeline=timeline] [-T OLDDIR=NEWDIR]
[-j num_threads] [--progress] [-q] [-v]
"""
import os
import unittest
import shutil
from .helpers.cfs_helpers import find_by_name
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
module_name = 'cfs_restore'
tblspace_name = 'cfs_tblspace'
tblspace_name_new = 'cfs_tblspace_new'
class CfsRestoreBase(ProbackupTest, unittest.TestCase):
def setUp(self):
self.fname = self.id().split('.')[3]
self.backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, self.fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
# 'ptrack_enable': 'on',
'cfs_encryption': 'off',
'max_wal_senders': '2'
}
)
self.init_pb(self.backup_dir)
self.add_instance(self.backup_dir, 'node', self.node)
self.set_archiving(self.backup_dir, 'node', self.node)
self.node.start()
self.create_tblspace_in_node(self.node, tblspace_name, cfs=True)
self.add_data_in_cluster()
self.backup_id = None
try:
self.backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full')
except ProbackupException as e:
self.fail(
"ERROR: Full backup failed \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
def add_data_in_cluster(self):
pass
def tearDown(self):
self.node.cleanup()
self.del_test_dir(module_name, self.fname)
class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase):
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_restore_empty_tablespace_from_fullbackup(self):
"""
Case: Restore empty tablespace from valid full backup.
"""
self.node.stop(["-m", "immediate"])
self.node.cleanup()
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
try:
self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id)
except ProbackupException as e:
self.fail(
"ERROR: Restore failed. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertTrue(
find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ["pg_compression"]),
"ERROR: Restored data is not valid. pg_compression not found in tablespace dir."
)
try:
self.node.start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
tblspace = self.node.safe_psql(
"postgres",
"SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(tblspace_name)
)
self.assertTrue(
tblspace_name in tblspace and "compression=true" in tblspace,
"ERROR: The tablespace not restored or it restored without compressions"
)
class CfsRestoreNoencTest(CfsRestoreBase):
def add_data_in_cluster(self):
self.node.safe_psql(
"postgres",
'CREATE TABLE {0} TABLESPACE {1} \
AS SELECT i AS id, MD5(i::text) AS text, \
MD5(repeat(i::text,10))::tsvector AS tsvector \
FROM generate_series(0,1e5) i'.format('t1', tblspace_name)
)
self.table_t1 = self.node.safe_psql(
"postgres",
"SELECT * FROM t1"
)
# --- Restore from full backup ---#
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_restore_from_fullbackup_to_old_location(self):
"""
Case: Restore instance from valid full backup to old location.
"""
self.node.stop()
self.node.cleanup()
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
try:
self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id)
except ProbackupException as e:
self.fail(
"ERROR: Restore from full backup failed. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertTrue(
find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']),
"ERROR: File pg_compression not found in tablespace dir"
)
try:
self.node.start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertEqual(
repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
repr(self.table_t1)
)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_restore_from_fullbackup_to_old_location_3_jobs(self):
"""
Case: Restore instance from valid full backup to old location.
"""
self.node.stop()
self.node.cleanup()
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
try:
self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id, options=['-j', '3'])
except ProbackupException as e:
self.fail(
"ERROR: Restore from full backup failed. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertTrue(
find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']),
"ERROR: File pg_compression not found in backup dir"
)
try:
self.node.start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertEqual(
repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
repr(self.table_t1)
)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_restore_from_fullbackup_to_new_location(self):
"""
Case: Restore instance from valid full backup to new location.
"""
self.node.stop()
self.node.cleanup()
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
self.node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname))
self.node_new.cleanup()
try:
self.restore_node(self.backup_dir, 'node', self.node_new, backup_id=self.backup_id)
except ProbackupException as e:
self.fail(
"ERROR: Restore from full backup failed. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertTrue(
find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']),
"ERROR: File pg_compression not found in backup dir"
)
try:
self.node_new.start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertEqual(
repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
repr(self.table_t1)
)
self.node_new.cleanup()
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_restore_from_fullbackup_to_new_location_5_jobs(self):
"""
Case: Restore instance from valid full backup to new location.
"""
self.node.stop()
self.node.cleanup()
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
self.node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname))
self.node_new.cleanup()
try:
self.restore_node(self.backup_dir, 'node', self.node_new, backup_id=self.backup_id, options=['-j', '5'])
except ProbackupException as e:
self.fail(
"ERROR: Restore from full backup failed. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertTrue(
find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']),
"ERROR: File pg_compression not found in backup dir"
)
try:
self.node_new.start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertEqual(
repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
repr(self.table_t1)
)
self.node_new.cleanup()
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_restore_from_fullbackup_to_old_location_tablespace_new_location(self):
self.node.stop()
self.node.cleanup()
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
os.mkdir(self.get_tblspace_path(self.node, tblspace_name_new))
try:
self.restore_node(
self.backup_dir,
'node', self.node,
backup_id=self.backup_id,
options=["-T", "{0}={1}".format(
self.get_tblspace_path(self.node, tblspace_name),
self.get_tblspace_path(self.node, tblspace_name_new)
)
]
)
except ProbackupException as e:
self.fail(
"ERROR: Restore from full backup failed. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertTrue(
find_by_name([self.get_tblspace_path(self.node, tblspace_name_new)], ['pg_compression']),
"ERROR: File pg_compression not found in new tablespace location"
)
try:
self.node.start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertEqual(
repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
repr(self.table_t1)
)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_restore_from_fullbackup_to_old_location_tablespace_new_location_3_jobs(self):
self.node.stop()
self.node.cleanup()
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
os.mkdir(self.get_tblspace_path(self.node, tblspace_name_new))
try:
self.restore_node(
self.backup_dir,
'node', self.node,
backup_id=self.backup_id,
options=["-j", "3", "-T", "{0}={1}".format(
self.get_tblspace_path(self.node, tblspace_name),
self.get_tblspace_path(self.node, tblspace_name_new)
)
]
)
except ProbackupException as e:
self.fail(
"ERROR: Restore from full backup failed. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertTrue(
find_by_name([self.get_tblspace_path(self.node, tblspace_name_new)], ['pg_compression']),
"ERROR: File pg_compression not found in new tablespace location"
)
try:
self.node.start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
repr(self.cmd),
repr(e.message)
)
)
self.assertEqual(
repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
repr(self.table_t1)
)
# @unittest.expectedFailure
@unittest.skip("skip")
def test_restore_from_fullbackup_to_new_location_tablespace_new_location(self):
pass
# @unittest.expectedFailure
@unittest.skip("skip")
def test_restore_from_fullbackup_to_new_location_tablespace_new_location_5_jobs(self):
pass
# @unittest.expectedFailure
@unittest.skip("skip")
def test_restore_from_ptrack(self):
"""
Case: Restore from backup to old location
"""
pass
# @unittest.expectedFailure
@unittest.skip("skip")
def test_restore_from_ptrack_jobs(self):
"""
Case: Restore from backup to old location, four jobs
"""
pass
# @unittest.expectedFailure
@unittest.skip("skip")
def test_restore_from_ptrack_new_jobs(self):
pass
# --------------------------------------------------------- #
# @unittest.expectedFailure
@unittest.skip("skip")
def test_restore_from_page(self):
"""
Case: Restore from backup to old location
"""
pass
# @unittest.expectedFailure
@unittest.skip("skip")
def test_restore_from_page_jobs(self):
"""
Case: Restore from backup to old location, four jobs
"""
pass
# @unittest.expectedFailure
@unittest.skip("skip")
def test_restore_from_page_new_jobs(self):
"""
Case: Restore from backup to new location, four jobs
"""
pass
#class CfsRestoreEncEmptyTablespaceTest(CfsRestoreNoencEmptyTablespaceTest):
# # --- Begin --- #
# def setUp(self):
# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key"
# super(CfsRestoreNoencEmptyTablespaceTest, self).setUp()
#
#
#class CfsRestoreEncTest(CfsRestoreNoencTest):
# # --- Begin --- #
# def setUp(self):
# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key"
# super(CfsRestoreNoencTest, self).setUp()

View File

@ -0,0 +1,25 @@
import os
import unittest
import random
from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
module_name = 'cfs_validate_backup'
tblspace_name = 'cfs_tblspace'
class CfsValidateBackupNoenc(ProbackupTest,unittest.TestCase):
def setUp(self):
pass
def test_validate_fullbackup_empty_tablespace_after_delete_pg_compression(self):
pass
def tearDown(self):
pass
#class CfsValidateBackupNoenc(CfsValidateBackupNoenc):
# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key"
# super(CfsValidateBackupNoenc).setUp()

496
tests/compression.py Normal file
View File

@ -0,0 +1,496 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from datetime import datetime, timedelta
import subprocess
module_name = 'compression'
class CompressionTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_compression_stream_zlib(self):
"""make archive node, make full and page stream backups, check data correctness in restored instance"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# FULL BACKUP
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,256) i")
full_result = node.execute("postgres", "SELECT * FROM t_heap")
full_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='full',
options=[
'--stream',
'--compress-algorithm=zlib'])
# PAGE BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(256,512) i")
page_result = node.execute("postgres", "SELECT * FROM t_heap")
page_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='page',
options=[
'--stream', '--compress-algorithm=zlib',
'--log-level-console=verbose',
'--log-level-file=verbose'])
# PTRACK BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(512,768) i")
ptrack_result = node.execute("postgres", "SELECT * FROM t_heap")
ptrack_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['--stream', '--compress-algorithm=zlib'])
# Drop Node
node.cleanup()
# Check full backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(full_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=full_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
# Check page backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(page_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=page_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
# Check ptrack backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=ptrack_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_compression_archive_zlib(self):
"""
make archive node, make full and page backups,
check data correctness in restored instance
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s',
'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# FULL BACKUP
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,1) i")
full_result = node.execute("postgres", "SELECT * FROM t_heap")
full_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='full',
options=["--compress-algorithm=zlib"])
# PAGE BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(0,2) i")
page_result = node.execute("postgres", "SELECT * FROM t_heap")
page_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='page',
options=["--compress-algorithm=zlib"])
# PTRACK BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,3) i")
ptrack_result = node.execute("postgres", "SELECT * FROM t_heap")
ptrack_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['--compress-algorithm=zlib'])
# Drop Node
node.cleanup()
# Check full backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(full_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=full_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
# Check page backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(page_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=page_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
# Check ptrack backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=ptrack_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_compression_stream_pglz(self):
"""
make archive node, make full and page stream backups,
check data correctness in restored instance
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# FULL BACKUP
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,256) i")
full_result = node.execute("postgres", "SELECT * FROM t_heap")
full_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='full',
options=['--stream', '--compress-algorithm=pglz'])
# PAGE BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(256,512) i")
page_result = node.execute("postgres", "SELECT * FROM t_heap")
page_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='page',
options=['--stream', '--compress-algorithm=pglz'])
# PTRACK BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(512,768) i")
ptrack_result = node.execute("postgres", "SELECT * FROM t_heap")
ptrack_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['--stream', '--compress-algorithm=pglz'])
# Drop Node
node.cleanup()
# Check full backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(full_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=full_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
# Check page backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(page_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=page_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
# Check ptrack backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=ptrack_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_compression_archive_pglz(self):
"""
make archive node, make full and page backups,
check data correctness in restored instance
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# FULL BACKUP
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(0,100) i")
full_result = node.execute("postgres", "SELECT * FROM t_heap")
full_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='full',
options=['--compress-algorithm=pglz'])
# PAGE BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(100,200) i")
page_result = node.execute("postgres", "SELECT * FROM t_heap")
page_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='page',
options=['--compress-algorithm=pglz'])
# PTRACK BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(200,300) i")
ptrack_result = node.execute("postgres", "SELECT * FROM t_heap")
ptrack_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['--compress-algorithm=pglz'])
# Drop Node
node.cleanup()
# Check full backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(full_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=full_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
# Check page backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(page_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=page_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
# Check ptrack backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
self.restore_node(
backup_dir, 'node', node, backup_id=ptrack_backup_id,
options=[
"-j", "4", "--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_compression_wrong_algorithm(self):
"""
make archive node, make full and page backups,
check data correctness in restored instance
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='full', options=['--compress-algorithm=bla-blah'])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because compress-algorithm is invalid.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(
e.message,
'ERROR: invalid compress algorithm value "bla-blah"\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)

203
tests/delete_test.py Normal file
View File

@ -0,0 +1,203 @@
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
import subprocess
from sys import exit
module_name = 'delete'
class DeleteTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_delete_full_backups(self):
"""delete full backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# full backup
self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
id_1 = show_backups[0]['id']
id_2 = show_backups[1]['id']
id_3 = show_backups[2]['id']
self.delete_pb(backup_dir, 'node', id_2)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(show_backups[0]['id'], id_1)
self.assertEqual(show_backups[1]['id'], id_3)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_increment_page(self):
"""delete increment and all after him"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# full backup mode
self.backup_node(backup_dir, 'node', node)
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="page")
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="page")
# full backup mode
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 4)
# delete first page backup
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 2)
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
self.assertEqual(show_backups[0]['status'], "OK")
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
self.assertEqual(show_backups[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_increment_ptrack(self):
"""delete increment and all after him"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica', 'ptrack_enable': 'on'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# full backup mode
self.backup_node(backup_dir, 'node', node)
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
# page backup mode
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
# full backup mode
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 4)
# delete first page backup
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 2)
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
self.assertEqual(show_backups[0]['status'], "OK")
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
self.assertEqual(show_backups[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delete_orphaned_wal_segments(self):
"""make archive node, make three full backups, delete second backup without --wal option, then delete orphaned wals via --wal option"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
# first full backup
backup_1_id = self.backup_node(backup_dir, 'node', node)
# second full backup
backup_2_id = self.backup_node(backup_dir, 'node', node)
# third full backup
backup_3_id = self.backup_node(backup_dir, 'node', node)
node.stop()
# Check wals
wals_dir = os.path.join(backup_dir, 'wal', 'node')
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
original_wal_quantity = len(wals)
# delete second full backup
self.delete_pb(backup_dir, 'node', backup_2_id)
# check wal quantity
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
# try to delete wals for second backup
self.delete_pb(backup_dir, 'node', options=['--wal'])
# check wal quantity
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_1_id)['status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
# delete first full backup
self.delete_pb(backup_dir, 'node', backup_1_id)
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
result = self.delete_pb(backup_dir, 'node', options=['--wal'])
# delete useless wals
self.assertTrue('INFO: removed min WAL segment' in result
and 'INFO: removed max WAL segment' in result)
self.validate_pb(backup_dir)
self.assertEqual(self.show_pb(backup_dir, 'node', backup_3_id)['status'], "OK")
# Check quantity, it should be lower than original
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
self.assertTrue(original_wal_quantity > len(wals), "Number of wals not changed after 'delete --wal' which is illegal")
# Delete last backup
self.delete_pb(backup_dir, 'node', backup_3_id, options=['--wal'])
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f)) and not f.endswith('.backup')]
self.assertEqual (0, len(wals), "Number of wals should be equal to 0")
# Clean after yourself
self.del_test_dir(module_name, fname)

1265
tests/delta.py Normal file

File diff suppressed because it is too large Load Diff

164
tests/exclude.py Normal file
View File

@ -0,0 +1,164 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
module_name = 'exclude'
class ExcludeTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_exclude_temp_tables(self):
"""
make node without archiving, create temp table, take full backup,
check that temp table not present in backup catalogue
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica', 'max_wal_senders': '2',
'shared_buffers': '1GB', 'fsync': 'off', 'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
conn = node.connect()
with node.connect("postgres") as conn:
conn.execute(
"create temp table test as "
"select generate_series(0,50050000)::text")
conn.commit()
temp_schema_name = conn.execute(
"SELECT nspname FROM pg_namespace "
"WHERE oid = pg_my_temp_schema()")[0][0]
conn.commit()
temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace(
"pg_", "")
conn.commit()
conn.execute("create index test_idx on test (generate_series)")
conn.commit()
heap_path = conn.execute(
"select pg_relation_filepath('test')")[0][0]
conn.commit()
index_path = conn.execute(
"select pg_relation_filepath('test_idx')")[0][0]
conn.commit()
heap_oid = conn.execute("select 'test'::regclass::oid")[0][0]
conn.commit()
toast_path = conn.execute(
"select pg_relation_filepath('{0}.{1}')".format(
temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0]
conn.commit()
toast_idx_path = conn.execute(
"select pg_relation_filepath('{0}.{1}')".format(
temp_toast_schema_name,
"pg_toast_" + str(heap_oid) + "_index"))[0][0]
conn.commit()
temp_table_filename = os.path.basename(heap_path)
temp_idx_filename = os.path.basename(index_path)
temp_toast_filename = os.path.basename(toast_path)
temp_idx_toast_filename = os.path.basename(toast_idx_path)
self.backup_node(
backup_dir, 'node', node, backup_type='full', options=['--stream'])
for root, dirs, files in os.walk(backup_dir):
for file in files:
if file in [
temp_table_filename, temp_table_filename + ".1",
temp_idx_filename,
temp_idx_filename + ".1",
temp_toast_filename,
temp_toast_filename + ".1",
temp_idx_toast_filename,
temp_idx_toast_filename + ".1"
]:
self.assertEqual(
1, 0,
"Found temp table file in backup catalogue.\n "
"Filepath: {0}".format(file))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_exclude_unlogged_tables_1(self):
"""
make node without archiving, create unlogged table, take full backup,
alter table to unlogged, take ptrack backup, restore ptrack backup,
check that PGDATA`s are physically the same
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
"shared_buffers": "10MB",
"fsync": "off",
'ptrack_enable': 'on'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
conn = node.connect()
with node.connect("postgres") as conn:
conn.execute(
"create unlogged table test as "
"select generate_series(0,5005000)::text")
conn.commit()
conn.execute("create index test_idx on test (generate_series)")
conn.commit()
self.backup_node(
backup_dir, 'node', node,
backup_type='full', options=['--stream'])
node.safe_psql('postgres', "alter table test set logged")
self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['--stream', '--log-level-file=verbose']
)
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
)
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored, options=["-j", "4"])
# Physical comparison
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -0,0 +1,95 @@
pg_probackup - utility to manage backup/recovery of PostgreSQL database.
pg_probackup help [COMMAND]
pg_probackup version
pg_probackup init -B backup-path
pg_probackup set-config -B backup-dir --instance=instance_name
[--log-level-console=log-level-console]
[--log-level-file=log-level-file]
[--log-filename=log-filename]
[--error-log-filename=error-log-filename]
[--log-directory=log-directory]
[--log-rotation-size=log-rotation-size]
[--log-rotation-age=log-rotation-age]
[--retention-redundancy=retention-redundancy]
[--retention-window=retention-window]
[--compress-algorithm=compress-algorithm]
[--compress-level=compress-level]
[-d dbname] [-h host] [-p port] [-U username]
[--master-db=db_name] [--master-host=host_name]
[--master-port=port] [--master-user=user_name]
[--replica-timeout=timeout]
pg_probackup show-config -B backup-dir --instance=instance_name
[--format=format]
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
[-C] [--stream [-S slot-name]] [--backup-pg-log]
[-j num-threads] [--archive-timeout=archive-timeout]
[--progress]
[--log-level-console=log-level-console]
[--log-level-file=log-level-file]
[--log-filename=log-filename]
[--error-log-filename=error-log-filename]
[--log-directory=log-directory]
[--log-rotation-size=log-rotation-size]
[--log-rotation-age=log-rotation-age]
[--delete-expired] [--delete-wal]
[--retention-redundancy=retention-redundancy]
[--retention-window=retention-window]
[--compress]
[--compress-algorithm=compress-algorithm]
[--compress-level=compress-level]
[-d dbname] [-h host] [-p port] [-U username]
[-w --no-password] [-W --password]
[--master-db=db_name] [--master-host=host_name]
[--master-port=port] [--master-user=user_name]
[--replica-timeout=timeout]
pg_probackup restore -B backup-dir --instance=instance_name
[-D pgdata-dir] [-i backup-id] [--progress]
[--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]
[--timeline=timeline] [-T OLDDIR=NEWDIR]
[--immediate] [--recovery-target-name=target-name]
[--recovery-target-action=pause|promote|shutdown]
[--restore-as-replica]
[--no-validate]
pg_probackup validate -B backup-dir [--instance=instance_name]
[-i backup-id] [--progress]
[--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]
[--recovery-target-name=target-name]
[--timeline=timeline]
pg_probackup show -B backup-dir
[--instance=instance_name [-i backup-id]]
[--format=format]
pg_probackup delete -B backup-dir --instance=instance_name
[--wal] [-i backup-id | --expired]
pg_probackup merge -B backup-dir --instance=instance_name
-i backup-id
pg_probackup add-instance -B backup-dir -D pgdata-dir
--instance=instance_name
pg_probackup del-instance -B backup-dir
--instance=instance_name
pg_probackup archive-push -B backup-dir --instance=instance_name
--wal-file-path=wal-file-path
--wal-file-name=wal-file-name
[--compress [--compress-level=compress-level]]
[--overwrite]
pg_probackup archive-get -B backup-dir --instance=instance_name
--wal-file-path=wal-file-path
--wal-file-name=wal-file-name
Read the website for details. <https://github.com/postgrespro/pg_probackup>
Report bugs to <https://github.com/postgrespro/pg_probackup/issues>.

View File

@ -0,0 +1 @@
pg_probackup 2.0.18

333
tests/false_positive.py Normal file
View File

@ -0,0 +1,333 @@
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
import subprocess
module_name = 'false_positive'
class FalsePositive(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
@unittest.expectedFailure
def test_validate_wal_lost_segment(self):
"""Loose segment located between backups. ExpectedFailure. This is BUG """
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.backup_node(backup_dir, 'node', node)
# make some wals
node.pgbench_init(scale=2)
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
options=["-c", "4", "-T", "10"]
)
pgbench.wait()
pgbench.stdout.close()
# delete last wal segment
wals_dir = os.path.join(backup_dir, "wal", 'node')
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(
os.path.join(wals_dir, f)) and not f.endswith('.backup')]
wals = map(int, wals)
os.remove(os.path.join(wals_dir, '0000000' + str(max(wals))))
# We just lost a wal segment and know nothing about it
self.backup_node(backup_dir, 'node', node)
self.assertTrue(
'validation completed successfully' in self.validate_pb(
backup_dir, 'node'))
########
# Clean after yourself
self.del_test_dir(module_name, fname)
@unittest.expectedFailure
# Need to force validation of ancestor-chain
def test_incremental_backup_corrupt_full_1(self):
"""page-level backup with corrupted full backup"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
file = os.path.join(
backup_dir, "backups", "node",
backup_id.decode("utf-8"), "database", "postgresql.conf")
os.remove(file)
try:
self.backup_node(backup_dir, 'node', node, backup_type="page")
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because page backup should not be "
"possible without valid full backup.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(
e.message,
'ERROR: Valid backup on current timeline is not found. '
'Create new FULL backup before an incremental one.\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
sleep(1)
self.assertFalse(
True,
"Expecting Error because page backup should not be "
"possible without valid full backup.\n "
"Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(
e.message,
'ERROR: Valid backup on current timeline is not found. '
'Create new FULL backup before an incremental one.\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertEqual(
self.show_pb(backup_dir, 'node')[0]['Status'], "ERROR")
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
@unittest.expectedFailure
def test_ptrack_concurrent_get_and_clear_1(self):
"""make node, make full and ptrack stream backups,"
" restore them and check data correctness"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '300s',
'ptrack_enable': 'on'
}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql(
"postgres",
"create table t_heap as select i"
" as id from generate_series(0,1) i"
)
self.backup_node(backup_dir, 'node', node, options=['--stream'])
gdb = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['--stream', '--log-level-file=verbose'],
gdb=True
)
gdb.set_breakpoint('make_pagemap_from_ptrack')
gdb.run_until_break()
node.safe_psql(
"postgres",
"update t_heap set id = 100500")
tablespace_oid = node.safe_psql(
"postgres",
"select oid from pg_tablespace where spcname = 'pg_default'").rstrip()
relfilenode = node.safe_psql(
"postgres",
"select 't_heap'::regclass::oid").rstrip()
node.safe_psql(
"postgres",
"SELECT pg_ptrack_get_and_clear({0}, {1})".format(
tablespace_oid, relfilenode))
gdb.continue_execution_until_exit()
self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=['--stream']
)
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.slow_start()
# Logical comparison
self.assertEqual(
result,
node.safe_psql("postgres", "SELECT * FROM t_heap")
)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
@unittest.expectedFailure
def test_ptrack_concurrent_get_and_clear_2(self):
"""make node, make full and ptrack stream backups,"
" restore them and check data correctness"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'checkpoint_timeout': '300s',
'ptrack_enable': 'on'
}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql(
"postgres",
"create table t_heap as select i"
" as id from generate_series(0,1) i"
)
self.backup_node(backup_dir, 'node', node, options=['--stream'])
gdb = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['--stream', '--log-level-file=verbose'],
gdb=True
)
gdb.set_breakpoint('pthread_create')
gdb.run_until_break()
node.safe_psql(
"postgres",
"update t_heap set id = 100500")
tablespace_oid = node.safe_psql(
"postgres",
"select oid from pg_tablespace "
"where spcname = 'pg_default'").rstrip()
relfilenode = node.safe_psql(
"postgres",
"select 't_heap'::regclass::oid").rstrip()
node.safe_psql(
"postgres",
"SELECT pg_ptrack_get_and_clear({0}, {1})".format(
tablespace_oid, relfilenode))
gdb._execute("delete breakpoints")
gdb.continue_execution_until_exit()
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='ptrack', options=['--stream']
)
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because of LSN mismatch from ptrack_control "
"and previous backup ptrack_lsn.\n"
" Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: LSN from ptrack_control' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
node.cleanup()
self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.slow_start()
# Logical comparison
self.assertEqual(
result,
node.safe_psql("postgres", "SELECT * FROM t_heap")
)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
@unittest.expectedFailure
def test_multiple_delete(self):
"""delete multiple backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
# first full backup
backup_1_id = self.backup_node(backup_dir, 'node', node)
# second full backup
backup_2_id = self.backup_node(backup_dir, 'node', node)
# third full backup
backup_3_id = self.backup_node(backup_dir, 'node', node)
node.stop()
self.delete_pb(backup_dir, 'node', options=
["-i {0}".format(backup_1_id), "-i {0}".format(backup_2_id), "-i {0}".format(backup_3_id)])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -0,0 +1,2 @@
__all__ = ['ptrack_helpers', 'cfs_helpers', 'expected_errors']
#from . import *

View File

@ -0,0 +1,91 @@
import os
import re
import random
import string
def find_by_extensions(dirs=None, extensions=None):
"""
find_by_extensions(['path1','path2'],['.txt','.log'])
:return:
Return list of files include full path by file extensions
"""
files = []
new_dirs = []
if dirs is not None and extensions is not None:
for d in dirs:
try:
new_dirs += [os.path.join(d, f) for f in os.listdir(d)]
except OSError:
if os.path.splitext(d)[1] in extensions:
files.append(d)
if new_dirs:
files.extend(find_by_extensions(new_dirs, extensions))
return files
def find_by_pattern(dirs=None, pattern=None):
"""
find_by_pattern(['path1','path2'],'^.*/*.txt')
:return:
Return list of files include full path by pattern
"""
files = []
new_dirs = []
if dirs is not None and pattern is not None:
for d in dirs:
try:
new_dirs += [os.path.join(d, f) for f in os.listdir(d)]
except OSError:
if re.match(pattern,d):
files.append(d)
if new_dirs:
files.extend(find_by_pattern(new_dirs, pattern))
return files
def find_by_name(dirs=None, filename=None):
files = []
new_dirs = []
if dirs is not None and filename is not None:
for d in dirs:
try:
new_dirs += [os.path.join(d, f) for f in os.listdir(d)]
except OSError:
if os.path.basename(d) in filename:
files.append(d)
if new_dirs:
files.extend(find_by_name(new_dirs, filename))
return files
def corrupt_file(filename):
file_size = None
try:
file_size = os.path.getsize(filename)
except OSError:
return False
try:
with open(filename, "rb+") as f:
f.seek(random.randint(int(0.1*file_size),int(0.8*file_size)))
f.write(random_string(0.1*file_size))
f.close()
except OSError:
return False
return True
def random_string(n):
a = string.ascii_letters + string.digits
return ''.join([random.choice(a) for i in range(int(n)+1)])

File diff suppressed because it is too large Load Diff

99
tests/init_test.py Normal file
View File

@ -0,0 +1,99 @@
import os
import unittest
from .helpers.ptrack_helpers import dir_files, ProbackupTest, ProbackupException
module_name = 'init'
class InitTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_success(self):
"""Success normal init"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname))
self.init_pb(backup_dir)
self.assertEqual(
dir_files(backup_dir),
['backups', 'wal']
)
self.add_instance(backup_dir, 'node', node)
self.assertEqual("INFO: Instance 'node' successfully deleted\n", self.del_instance(backup_dir, 'node'),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(self.output), self.cmd))
# Show non-existing instance
try:
self.show_pb(backup_dir, 'node')
self.assertEqual(1, 0, 'Expecting Error due to show of non-existing instance. Output: {0} \n CMD: {1}'.format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
"ERROR: Instance 'node' does not exist in this backup catalog\n",
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd))
# Delete non-existing instance
try:
self.del_instance(backup_dir, 'node1')
self.assertEqual(1, 0, 'Expecting Error due to delete of non-existing instance. Output: {0} \n CMD: {1}'.format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
"ERROR: Instance 'node1' does not exist in this backup catalog\n",
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd))
# Add instance without pgdata
try:
self.run_pb([
"add-instance",
"--instance=node1",
"-B", backup_dir
])
self.assertEqual(1, 0, 'Expecting Error due to adding instance without pgdata. Output: {0} \n CMD: {1}'.format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
"ERROR: Required parameter not specified: PGDATA (-D, --pgdata)\n",
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_already_exist(self):
"""Failure with backup catalog already existed"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname))
self.init_pb(backup_dir)
try:
self.show_pb(backup_dir, 'node')
self.assertEqual(1, 0, 'Expecting Error due to initialization in non-empty directory. Output: {0} \n CMD: {1}'.format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
"ERROR: Instance 'node' does not exist in this backup catalog\n",
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_abs_path(self):
"""failure with backup catalog should be given as absolute path"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname))
try:
self.run_pb(["init", "-B", os.path.relpath("%s/backup" % node.base_dir, self.dir_path)])
self.assertEqual(1, 0, 'Expecting Error due to initialization with non-absolute path in --backup-path. Output: {0} \n CMD: {1}'.format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
"ERROR: -B, --backup-path must be an absolute path\n",
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)

0
tests/logging.py Normal file
View File

454
tests/merge.py Normal file
View File

@ -0,0 +1,454 @@
# coding: utf-8
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest
module_name = "merge"
class MergeTest(ProbackupTest, unittest.TestCase):
def test_merge_full_page(self):
"""
Test MERGE command, it merges FULL backup with target PAGE backups
"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=["--data-checksums"]
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, "node", node)
self.set_archiving(backup_dir, "node", node)
node.start()
# Do full backup
self.backup_node(backup_dir, "node", node)
show_backup = self.show_pb(backup_dir, "node")[0]
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "FULL")
# Fill with data
with node.connect() as conn:
conn.execute("create table test (id int)")
conn.execute(
"insert into test select i from generate_series(1,10) s(i)")
conn.commit()
# Do first page backup
self.backup_node(backup_dir, "node", node, backup_type="page")
show_backup = self.show_pb(backup_dir, "node")[1]
# sanity check
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "PAGE")
# Fill with data
with node.connect() as conn:
conn.execute(
"insert into test select i from generate_series(1,10) s(i)")
count1 = conn.execute("select count(*) from test")
conn.commit()
# Do second page backup
self.backup_node(backup_dir, "node", node, backup_type="page")
show_backup = self.show_pb(backup_dir, "node")[2]
page_id = show_backup["id"]
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
# sanity check
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "PAGE")
# Merge all backups
self.merge_backup(backup_dir, "node", page_id)
show_backups = self.show_pb(backup_dir, "node")
# sanity check
self.assertEqual(len(show_backups), 1)
self.assertEqual(show_backups[0]["status"], "OK")
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
# Drop node and restore it
node.cleanup()
self.restore_node(backup_dir, 'node', node)
# Check physical correctness
if self.paranoia:
pgdata_restored = self.pgdata_content(
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.slow_start()
# Check restored node
count2 = node.execute("postgres", "select count(*) from test")
self.assertEqual(count1, count2)
# Clean after yourself
node.cleanup()
self.del_test_dir(module_name, fname)
def test_merge_compressed_backups(self):
"""
Test MERGE command with compressed backups
"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=["--data-checksums"]
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, "node", node)
self.set_archiving(backup_dir, "node", node)
node.start()
# Do full compressed backup
self.backup_node(backup_dir, "node", node, options=[
'--compress-algorithm=zlib'])
show_backup = self.show_pb(backup_dir, "node")[0]
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "FULL")
# Fill with data
with node.connect() as conn:
conn.execute("create table test (id int)")
conn.execute(
"insert into test select i from generate_series(1,10) s(i)")
count1 = conn.execute("select count(*) from test")
conn.commit()
# Do compressed page backup
self.backup_node(
backup_dir, "node", node, backup_type="page",
options=['--compress-algorithm=zlib'])
show_backup = self.show_pb(backup_dir, "node")[1]
page_id = show_backup["id"]
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "PAGE")
# Merge all backups
self.merge_backup(backup_dir, "node", page_id)
show_backups = self.show_pb(backup_dir, "node")
self.assertEqual(len(show_backups), 1)
self.assertEqual(show_backups[0]["status"], "OK")
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
# Drop node and restore it
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.slow_start()
# Check restored node
count2 = node.execute("postgres", "select count(*) from test")
self.assertEqual(count1, count2)
# Clean after yourself
node.cleanup()
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_merge_tablespaces(self):
"""
Some test here
"""
def test_merge_page_truncate(self):
"""
make node, create table, take full backup,
delete last 3 pages, vacuum relation,
take page backup, merge full and page,
restore last page backup and check data correctness
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '300s',
'autovacuum': 'off'
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
"postgres",
"create sequence t_seq; "
"create table t_heap tablespace somedata as select i as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1024) i;")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"delete from t_heap where ctid >= '(11,0)'")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(
backup_dir, 'node', node, backup_type='page')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
page_id = self.show_pb(backup_dir, "node")[1]["id"]
self.merge_backup(backup_dir, "node", page_id)
self.validate_pb(backup_dir)
old_tablespace = self.get_tblspace_path(node, 'somedata')
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
"--recovery-target-action=promote"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
# Logical comparison
result1 = node.safe_psql(
"postgres",
"select * from t_heap")
result2 = node_restored.safe_psql(
"postgres",
"select * from t_heap")
self.assertEqual(result1, result2)
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_merge_delta_truncate(self):
"""
make node, create table, take full backup,
delete last 3 pages, vacuum relation,
take page backup, merge full and page,
restore last page backup and check data correctness
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '300s',
'autovacuum': 'off'
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
"postgres",
"create sequence t_seq; "
"create table t_heap tablespace somedata as select i as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1024) i;")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"delete from t_heap where ctid >= '(11,0)'")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
page_id = self.show_pb(backup_dir, "node")[1]["id"]
self.merge_backup(backup_dir, "node", page_id)
self.validate_pb(backup_dir)
old_tablespace = self.get_tblspace_path(node, 'somedata')
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
"--recovery-target-action=promote"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
# Logical comparison
result1 = node.safe_psql(
"postgres",
"select * from t_heap")
result2 = node_restored.safe_psql(
"postgres",
"select * from t_heap")
self.assertEqual(result1, result2)
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_merge_ptrack_truncate(self):
"""
make node, create table, take full backup,
delete last 3 pages, vacuum relation,
take page backup, merge full and page,
restore last page backup and check data correctness
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '300s',
'autovacuum': 'off'
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
"postgres",
"create sequence t_seq; "
"create table t_heap tablespace somedata as select i as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1024) i;")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"delete from t_heap where ctid >= '(11,0)'")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
page_id = self.show_pb(backup_dir, "node")[1]["id"]
self.merge_backup(backup_dir, "node", page_id)
self.validate_pb(backup_dir)
old_tablespace = self.get_tblspace_path(node, 'somedata')
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
"--recovery-target-action=promote"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
# Logical comparison
result1 = node.safe_psql(
"postgres",
"select * from t_heap")
result2 = node_restored.safe_psql(
"postgres",
"select * from t_heap")
self.assertEqual(result1, result2)
# Clean after yourself
self.del_test_dir(module_name, fname)

218
tests/option_test.py Normal file
View File

@ -0,0 +1,218 @@
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
module_name = 'option'
class OptionTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_help_1(self):
"""help options"""
self.maxDiff = None
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
with open(os.path.join(self.dir_path, "expected/option_help.out"), "rb") as help_out:
self.assertEqual(
self.run_pb(["--help"]),
help_out.read().decode("utf-8")
)
# @unittest.skip("skip")
def test_version_2(self):
"""help options"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out:
self.assertIn(
version_out.read().decode("utf-8"),
self.run_pb(["--version"])
)
# @unittest.skip("skip")
def test_without_backup_path_3(self):
"""backup command failure without backup mode option"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
try:
self.run_pb(["backup", "-b", "full"])
self.assertEqual(1, 0, "Expecting Error because '-B' parameter is not specified.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message, 'ERROR: required parameter not specified: BACKUP_PATH (-B, --backup-path)\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# @unittest.skip("skip")
def test_options_4(self):
"""check options test"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
# backup command failure without instance option
try:
self.run_pb(["backup", "-B", backup_dir, "-D", node.data_dir, "-b", "full"])
self.assertEqual(1, 0, "Expecting Error because 'instance' parameter is not specified.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
'ERROR: required parameter not specified: --instance\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# backup command failure without backup mode option
try:
self.run_pb(["backup", "-B", backup_dir, "--instance=node", "-D", node.data_dir])
self.assertEqual(1, 0, "Expecting Error because '-b' parameter is not specified.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertIn('ERROR: required parameter not specified: BACKUP_MODE (-b, --backup-mode)',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# backup command failure with invalid backup mode option
try:
self.run_pb(["backup", "-B", backup_dir, "--instance=node", "-b", "bad"])
self.assertEqual(1, 0, "Expecting Error because backup-mode parameter is invalid.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
'ERROR: invalid backup-mode "bad"\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# delete failure without delete options
try:
self.run_pb(["delete", "-B", backup_dir, "--instance=node"])
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because delete options are omitted.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
'ERROR: You must specify at least one of the delete options: --expired |--wal |--backup_id\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# delete failure without ID
try:
self.run_pb(["delete", "-B", backup_dir, "--instance=node", '-i'])
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because backup ID is omitted.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue("option requires an argument -- 'i'" in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_options_5(self):
"""check options test"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
pg_options={
'wal_level': 'logical',
'max_wal_senders': '2'})
self.assertEqual("INFO: Backup catalog '{0}' successfully inited\n".format(backup_dir),
self.init_pb(backup_dir))
self.add_instance(backup_dir, 'node', node)
node.start()
# syntax error in pg_probackup.conf
with open(os.path.join(backup_dir, "backups", "node", "pg_probackup.conf"), "a") as conf:
conf.write(" = INFINITE\n")
try:
self.backup_node(backup_dir, 'node', node)
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of garbage in pg_probackup.conf.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
'ERROR: syntax error in " = INFINITE"\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
self.clean_pb(backup_dir)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
# invalid value in pg_probackup.conf
with open(os.path.join(backup_dir, "backups", "node", "pg_probackup.conf"), "a") as conf:
conf.write("BACKUP_MODE=\n")
try:
self.backup_node(backup_dir, 'node', node, backup_type=None),
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of invalid backup-mode in pg_probackup.conf.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
'ERROR: invalid backup-mode ""\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
self.clean_pb(backup_dir)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
# Command line parameters should override file values
with open(os.path.join(backup_dir, "backups", "node", "pg_probackup.conf"), "a") as conf:
conf.write("retention-redundancy=1\n")
self.assertEqual(self.show_config(backup_dir, 'node')['retention-redundancy'], '1')
# User cannot send --system-identifier parameter via command line
try:
self.backup_node(backup_dir, 'node', node, options=["--system-identifier", "123"]),
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because option system-identifier cannot be specified in command line.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
'ERROR: option system-identifier cannot be specified in command line\n',
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# invalid value in pg_probackup.conf
with open(os.path.join(backup_dir, "backups", "node", "pg_probackup.conf"), "a") as conf:
conf.write("SMOOTH_CHECKPOINT=FOO\n")
try:
self.backup_node(backup_dir, 'node', node)
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because option -C should be boolean.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
"ERROR: option -C, --smooth-checkpoint should be a boolean: 'FOO'\n",
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
self.clean_pb(backup_dir)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
# invalid option in pg_probackup.conf
pbconf_path = os.path.join(backup_dir, "backups", "node", "pg_probackup.conf")
with open(pbconf_path, "a") as conf:
conf.write("TIMELINEID=1\n")
try:
self.backup_node(backup_dir, 'node', node)
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, 'Expecting Error because of invalid option "TIMELINEID".\n Output: {0} \n CMD: {1}'.format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
'ERROR: invalid option "TIMELINEID" in file "{0}"\n'.format(pbconf_path),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)

641
tests/page.py Normal file
View File

@ -0,0 +1,641 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
import subprocess
module_name = 'page'
class PageBackupTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_page_vacuum_truncate(self):
"""
make node, create table, take full backup,
delete last 3 pages, vacuum relation,
take page backup, take second page backup,
restore last page backup and check data correctness
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '300s',
'autovacuum': 'off'
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
"postgres",
"create sequence t_seq; "
"create table t_heap tablespace somedata as select i as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1024) i;")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"delete from t_heap where ctid >= '(11,0)'")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(
backup_dir, 'node', node, backup_type='page',
options=['--log-level-file=verbose'])
self.backup_node(
backup_dir, 'node', node, backup_type='page')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
old_tablespace = self.get_tblspace_path(node, 'somedata')
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
"--recovery-target-action=promote"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
# Logical comparison
result1 = node.safe_psql(
"postgres",
"select * from t_heap")
result2 = node_restored.safe_psql(
"postgres",
"select * from t_heap")
self.assertEqual(result1, result2)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_page_stream(self):
"""
make archive node, take full and page stream backups,
restore them and check data correctness
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# FULL BACKUP
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(0,100) i")
full_result = node.execute("postgres", "SELECT * FROM t_heap")
full_backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='full', options=['--stream'])
# PAGE BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector "
"from generate_series(100,200) i")
page_result = node.execute("postgres", "SELECT * FROM t_heap")
page_backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='page', options=['--stream'])
# Drop Node
node.cleanup()
# Check full backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(full_backup_id),
self.restore_node(
backup_dir, 'node', node,
backup_id=full_backup_id, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(self.output), self.cmd))
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
# Check page backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(page_backup_id),
self.restore_node(
backup_dir, 'node', node,
backup_id=page_backup_id, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(self.output), self.cmd))
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_page_archive(self):
"""
make archive node, take full and page archive backups,
restore them and check data correctness
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# FULL BACKUP
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,1) i")
full_result = node.execute("postgres", "SELECT * FROM t_heap")
full_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='full')
# PAGE BACKUP
node.safe_psql(
"postgres",
"insert into t_heap select i as id, "
"md5(i::text) as text, md5(i::text)::tsvector as tsvector "
"from generate_series(0,2) i")
page_result = node.execute("postgres", "SELECT * FROM t_heap")
page_backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
# Drop Node
node.cleanup()
# Restore and check full backup
self.assertIn("INFO: Restore of backup {0} completed.".format(
full_backup_id),
self.restore_node(
backup_dir, 'node', node,
backup_id=full_backup_id,
options=[
"-j", "4",
"--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
# Restore and check page backup
self.assertIn(
"INFO: Restore of backup {0} completed.".format(page_backup_id),
self.restore_node(
backup_dir, 'node', node,
backup_id=page_backup_id,
options=[
"-j", "4",
"--immediate",
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_page_multiple_segments(self):
"""
Make node, create table with multiple segments,
write some data to it, check page and data correctness
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'fsync': 'off',
'shared_buffers': '1GB',
'maintenance_work_mem': '1GB',
'autovacuum': 'off',
'full_page_writes': 'off'
}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# CREATE TABLE
node.pgbench_init(scale=100, options=['--tablespace=somedata'])
# FULL BACKUP
self.backup_node(backup_dir, 'node', node)
# PGBENCH STUFF
pgbench = node.pgbench(options=['-T', '50', '-c', '1', '--no-vacuum'])
pgbench.wait()
node.safe_psql("postgres", "checkpoint")
# GET LOGICAL CONTENT FROM NODE
result = node.safe_psql("postgres", "select * from pgbench_accounts")
# PAGE BACKUP
self.backup_node(
backup_dir, 'node', node, backup_type='page',
options=["--log-level-file=verbose"])
# GET PHYSICAL CONTENT FROM NODE
pgdata = self.pgdata_content(node.data_dir)
# RESTORE NODE
restored_node = self.make_simple_node(
base_dir="{0}/{1}/restored_node".format(module_name, fname))
restored_node.cleanup()
tblspc_path = self.get_tblspace_path(node, 'somedata')
tblspc_path_new = self.get_tblspace_path(
restored_node, 'somedata_restored')
self.restore_node(
backup_dir, 'node', restored_node,
options=[
"-j", "4",
"--recovery-target-action=promote",
"-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
# GET PHYSICAL CONTENT FROM NODE_RESTORED
pgdata_restored = self.pgdata_content(restored_node.data_dir)
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.slow_start()
result_new = restored_node.safe_psql(
"postgres", "select * from pgbench_accounts")
# COMPARE RESTORED FILES
self.assertEqual(result, result_new, 'data is lost')
if self.paranoia:
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_page_delete(self):
"""
Make node, create tablespace with table, take full backup,
delete everything from table, vacuum table, take page backup,
restore page backup, compare .
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'autovacuum': 'off'
}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# FULL backup
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"create table t_heap tablespace somedata as select i as id,"
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
" from generate_series(0,100) i"
)
node.safe_psql(
"postgres",
"delete from t_heap"
)
node.safe_psql(
"postgres",
"vacuum t_heap"
)
# PAGE BACKUP
self.backup_node(
backup_dir, 'node', node, backup_type='page')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
)
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(
self.get_tblspace_path(node, 'somedata'),
self.get_tblspace_path(node_restored, 'somedata'))
]
)
# GET RESTORED PGDATA AND COMPARE
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start()
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_page_delete_1(self):
"""
Make node, create tablespace with table, take full backup,
delete everything from table, vacuum table, take page backup,
restore page backup, compare .
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s',
'autovacuum': 'off'
}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
"postgres",
"create table t_heap tablespace somedata as select i as id,"
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
" from generate_series(0,100) i"
)
# FULL backup
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"delete from t_heap"
)
node.safe_psql(
"postgres",
"vacuum t_heap"
)
# PAGE BACKUP
self.backup_node(
backup_dir, 'node', node, backup_type='page')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
)
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(
self.get_tblspace_path(node, 'somedata'),
self.get_tblspace_path(node_restored, 'somedata'))
]
)
# GET RESTORED PGDATA AND COMPARE
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start()
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_parallel_pagemap(self):
"""
Test for parallel WAL segments reading, during which pagemap is built
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={
"hot_standby": "on"
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node_restored.cleanup()
self.set_archiving(backup_dir, 'node', node)
node.start()
# Do full backup
self.backup_node(backup_dir, 'node', node)
show_backup = self.show_pb(backup_dir, 'node')[0]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "FULL")
# Fill instance with data and make several WAL segments ...
with node.connect() as conn:
conn.execute("create table test (id int)")
for x in range(0, 8):
conn.execute(
"insert into test select i from generate_series(1,100) s(i)")
conn.commit()
self.switch_wal_segment(conn)
count1 = conn.execute("select count(*) from test")
# ... and do page backup with parallel pagemap
self.backup_node(
backup_dir, 'node', node, backup_type="page", options=["-j", "4"])
show_backup = self.show_pb(backup_dir, 'node')[1]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PAGE")
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
# Restore it
self.restore_node(backup_dir, 'node', node_restored)
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
# Check restored node
count2 = node_restored.execute("postgres", "select count(*) from test")
self.assertEqual(count1, count2)
# Clean after yourself
node.cleanup()
node_restored.cleanup()
self.del_test_dir(module_name, fname)
def test_parallel_pagemap_1(self):
"""
Test for parallel WAL segments reading, during which pagemap is built
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# Do full backup
self.backup_node(backup_dir, 'node', node)
show_backup = self.show_pb(backup_dir, 'node')[0]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "FULL")
# Fill instance with data and make several WAL segments ...
node.pgbench_init(scale=10)
# do page backup in single thread
page_id = self.backup_node(
backup_dir, 'node', node, backup_type="page")
self.delete_pb(backup_dir, 'node', page_id)
# ... and do page backup with parallel pagemap
self.backup_node(
backup_dir, 'node', node, backup_type="page", options=["-j", "4"])
show_backup = self.show_pb(backup_dir, 'node')[1]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PAGE")
# Drop node and restore it
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.start()
# Clean after yourself
node.cleanup()
self.del_test_dir(module_name, fname)

98
tests/pgpro560.py Normal file
View File

@ -0,0 +1,98 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from datetime import datetime, timedelta
import subprocess
module_name = 'pgpro560'
class CheckSystemID(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_pgpro560_control_file_loss(self):
"""
https://jira.postgrespro.ru/browse/PGPRO-560
make node with stream support, delete control file
make backup
check that backup failed
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
file = os.path.join(node.base_dir,'data', 'global', 'pg_control')
os.remove(file)
try:
self.backup_node(backup_dir, 'node', node, options=['--stream'])
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because pg_control was deleted.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: could not open file' in e.message
and 'pg_control' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_pgpro560_systemid_mismatch(self):
"""
https://jira.postgrespro.ru/browse/PGPRO-560
make node1 and node2
feed to backup PGDATA from node1 and PGPORT from node2
check that backup failed
"""
fname = self.id().split('.')[3]
node1 = self.make_simple_node(base_dir="{0}/{1}/node1".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
node1.start()
node2 = self.make_simple_node(base_dir="{0}/{1}/node2".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
node2.start()
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node1', node1)
try:
self.backup_node(backup_dir, 'node1', node2, options=['--stream'])
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of SYSTEM ID mismatch.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: Backup data directory was initialized for system id' in e.message
and 'but connected instance system id is' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
try:
self.backup_node(backup_dir, 'node1', node2, data_dir=node1.data_dir, options=['--stream'])
# we should die here because exception is what we expect to happen
self.assertEqual(1, 0, "Expecting Error because of of SYSTEM ID mismatch.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'ERROR: Backup data directory was initialized for system id' in e.message
and 'but connected instance system id is' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)

80
tests/pgpro589.py Normal file
View File

@ -0,0 +1,80 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from datetime import datetime, timedelta
import subprocess
module_name = 'pgpro589'
class ArchiveCheck(ProbackupTest, unittest.TestCase):
def test_pgpro589(self):
"""
https://jira.postgrespro.ru/browse/PGPRO-589
make node without archive support, make backup which should fail
check that backup status equal to ERROR
check that no files where copied to backup catalogue
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
# make erroneus archive_command
node.append_conf("postgresql.auto.conf", "archive_command = 'exit 0'")
node.start()
node.pgbench_init(scale=5)
pgbench = node.pgbench(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
options=["-c", "4", "-T", "10"]
)
pgbench.wait()
pgbench.stdout.close()
path = node.safe_psql(
"postgres",
"select pg_relation_filepath('pgbench_accounts')").rstrip().decode(
"utf-8")
try:
self.backup_node(
backup_dir, 'node', node,
options=['--archive-timeout=10'])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because of missing archive wal "
"segment with start_lsn.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'INFO: Wait for WAL segment' in e.message and
'ERROR: Switched WAL segment' in e.message and
'could not be archived' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
backup_id = self.show_pb(backup_dir, 'node')[0]['id']
self.assertEqual(
'ERROR', self.show_pb(backup_dir, 'node', backup_id)['status'],
'Backup should have ERROR status')
file = os.path.join(
backup_dir, 'backups', 'node',
backup_id, 'database', path)
self.assertFalse(
os.path.isfile(file),
"\n Start LSN was not found in archive but datafiles where "
"copied to backup catalogue.\n For example: {0}\n "
"It is not optimal".format(file))
# Clean after yourself
self.del_test_dir(module_name, fname)

1600
tests/ptrack.py Normal file

File diff suppressed because it is too large Load Diff

253
tests/ptrack_clean.py Normal file
View File

@ -0,0 +1,253 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
import time
module_name = 'ptrack_clean'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_clean(self):
"""Take backups of every available types and check that PTRACK is clean"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata "
"as select i as id, nextval('t_seq') as t_seq, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3}) "
"tablespace somedata".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
# Take FULL backup to clean every ptrack
self.backup_node(
backup_dir, 'node', node,
options=['-j10', '--stream'])
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything and vacuum it
node.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), "
"text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
node.safe_psql('postgres', 'vacuum t_heap')
# Take PTRACK backup to clean every ptrack
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='ptrack',
options=['-j10', '--log-level-file=verbose'])
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything and vacuum it
node.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), "
"text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
node.safe_psql('postgres', 'vacuum t_heap')
# Take PAGE backup to clean every ptrack
self.backup_node(
backup_dir, 'node', node,
backup_type='page', options=['-j10'])
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_clean_replica(self):
"""Take backups of every available types from master and check that PTRACK on replica is clean"""
fname = self.id().split('.')[3]
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'ptrack_enable': 'on',
'wal_level': 'replica',
'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap as select i as id, "
"nextval('t_seq') as t_seq, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql(
"postgres",
"create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'],
idx_ptrack[i]['type'],
idx_ptrack[i]['column']))
# Take FULL backup to clean every ptrack
self.backup_node(
backup_dir,
'replica',
replica,
options=[
'-j10', '--stream',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything and vacuum it
master.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), "
"text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
master.safe_psql('postgres', 'vacuum t_heap')
# Take PTRACK backup to clean every ptrack
backup_id = self.backup_node(
backup_dir,
'replica',
replica,
backup_type='ptrack',
options=[
'-j10', '--stream',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Update everything and vacuum it
master.safe_psql(
'postgres',
"update t_heap set t_seq = nextval('t_seq'), text = md5(text), "
"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# Take PAGE backup to clean every ptrack
self.backup_node(
backup_dir,
'replica',
replica,
backup_type='page',
options=[
'-j10', '--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# # get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack bits are cleaned
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Clean after yourself
self.del_test_dir(module_name, fname)

268
tests/ptrack_cluster.py Normal file
View File

@ -0,0 +1,268 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
from time import sleep
from sys import exit
module_name = 'ptrack_cluster'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_cluster_on_btree(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'cluster t_heap using t_btree')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_gist(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
# Create table and indexes
node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'cluster t_heap using t_gist')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# Compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_btree_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'cluster t_heap using t_btree')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)
#@unittest.skip("skip")
def test_ptrack_cluster_on_gist_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'cluster t_heap using t_gist')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# Compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -0,0 +1,57 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_move_to_tablespace'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_recovery(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql("postgres",
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text,md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
# Move table and indexes and make checkpoint
for i in idx_ptrack:
if idx_ptrack[i]['type'] == 'heap':
node.safe_psql('postgres', 'alter table {0} set tablespace somedata;'.format(i))
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql('postgres', 'alter index {0} set tablespace somedata'.format(i))
node.safe_psql('postgres', 'checkpoint')
# Check ptrack files
for i in idx_ptrack:
if idx_ptrack[i]['type'] == 'seq':
continue
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack has correct bits after recovery
self.check_ptrack_recovery(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

58
tests/ptrack_recovery.py Normal file
View File

@ -0,0 +1,58 @@
import os
import unittest
from sys import exit
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_recovery'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_recovery(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table
node.safe_psql("postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text,md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
# Create indexes
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['size'] = int(self.get_fork_size(node, i))
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
if self.verbose:
print('Killing postmaster. Losing Ptrack changes')
node.stop(['-m', 'immediate', '-D', node.data_dir])
if not node.status():
node.start()
else:
print("Die! Die! Why won't you die?... Why won't you die?")
exit(1)
for i in idx_ptrack:
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
# check that ptrack has correct bits after recovery
self.check_ptrack_recovery(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

130
tests/ptrack_truncate.py Normal file
View File

@ -0,0 +1,130 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_truncate'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_truncate(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'truncate t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_truncate_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
replica.safe_psql('postgres', 'truncate t_heap')
replica.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Delete some rows, vacuum it and make checkpoint
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

152
tests/ptrack_vacuum.py Normal file
View File

@ -0,0 +1,152 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_vacuum'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Delete some rows, vacuum it and make checkpoint
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get fork size and calculate it in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums for every page of this fork
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make FULL backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
# Delete some rows, vacuum it and make checkpoint
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
# CHECK PTRACK SANITY
for i in idx_ptrack:
# get new size of heap and indexes and calculate it in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -0,0 +1,136 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_vacuum_bits_frozen'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_bits_frozen(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
res = node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'vacuum freeze t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_bits_frozen_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Take PTRACK backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'vacuum freeze t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -0,0 +1,67 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_vacuum_bits_visibility'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_bits_visibility(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
res = node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

140
tests/ptrack_vacuum_full.py Normal file
View File

@ -0,0 +1,140 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_vacuum_full'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_full(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
res = node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,127) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'vacuum full t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity, the most important part
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_full_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,127) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Take FULL backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
master.safe_psql('postgres', 'vacuum full t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity, the most important part
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -0,0 +1,142 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
module_name = 'ptrack_vacuum_truncate'
class SimpleTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_truncate(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# Create table and indexes
res = node.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id > 128;')
node.safe_psql('postgres', 'vacuum t_heap')
node.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_truncate_replica(self):
fname = self.id().split('.')[3]
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.start()
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.add_instance(backup_dir, 'replica', replica)
self.set_replica(master, replica, 'replica', synchronous=True)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
# Create table and indexes
master.safe_psql(
"postgres",
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
for i in idx_ptrack:
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get size of heap and indexes. size calculated in pages
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
# get path to heap and index files
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate md5sums of pages
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Take PTRACK backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id > 128;')
master.safe_psql('postgres', 'vacuum t_heap')
master.safe_psql('postgres', 'checkpoint')
for i in idx_ptrack:
# get new size of heap and indexes. size calculated in pages
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
# update path to heap and index files in case they`ve changed
idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
# calculate new md5sums for pages
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
# get ptrack for every idx
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
# compare pages and check ptrack sanity
self.check_ptrack_sanity(idx_ptrack[i])
# Clean after yourself
self.del_test_dir(module_name, fname)

293
tests/replica.py Normal file
View File

@ -0,0 +1,293 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
from datetime import datetime, timedelta
import subprocess
from sys import exit
import time
module_name = 'replica'
class ReplicaTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_replica_stream_ptrack_backup(self):
"""
make node, take full backup, restore it and make replica from it,
take full stream backup from replica
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica', 'max_wal_senders': '2',
'checkpoint_timeout': '30s', 'ptrack_enable': 'on'}
)
master.start()
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
# CREATE TABLE
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,256) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
# take full backup and restore it
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
self.set_replica(master, replica)
# Check data correctness on replica
replica.slow_start(replica=True)
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Change data on master, take FULL backup from replica,
# restore taken backup and check that restored data equal
# to original data
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(256,512) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
self.add_instance(backup_dir, 'replica', replica)
backup_id = self.backup_node(
backup_dir, 'replica', replica,
options=[
'--stream',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
self.validate_pb(backup_dir, 'replica')
self.assertEqual(
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
# RESTORE FULL BACKUP TAKEN FROM PREVIOUS STEP
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname))
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Change data on master, take PTRACK backup from replica,
# restore taken backup and check that restored data equal
# to original data
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(512,768) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
backup_id = self.backup_node(
backup_dir, 'replica', replica, backup_type='ptrack',
options=[
'--stream',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
self.validate_pb(backup_dir, 'replica')
self.assertEqual(
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
# RESTORE PTRACK BACKUP TAKEN FROM replica
node.cleanup()
self.restore_node(
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_replica_archive_page_backup(self):
"""
make archive master, take full and page archive backups from master,
set replica, make archive backup from replica
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)
# force more frequent wal switch
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.backup_node(backup_dir, 'master', master)
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,256) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
backup_id = self.backup_node(
backup_dir, 'master', master, backup_type='page')
self.restore_node(backup_dir, 'master', replica)
# Settings for Replica
self.set_replica(master, replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.slow_start(replica=True)
# Check data correctness on replica
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Change data on master, take FULL backup from replica,
# restore taken backup and check that restored data
# equal to original data
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(256,512) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
self.add_instance(backup_dir, 'replica', replica)
backup_id = self.backup_node(
backup_dir, 'replica', replica,
options=[
'--archive-timeout=300',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
self.validate_pb(backup_dir, 'replica')
self.assertEqual(
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
# RESTORE FULL BACKUP TAKEN FROM replica
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname))
node.cleanup()
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Change data on master, make PAGE backup from replica,
# restore taken backup and check that restored data equal
# to original data
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(512,768) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
backup_id = self.backup_node(
backup_dir, 'replica', replica, backup_type='page',
options=[
'--archive-timeout=300',
'--master-host=localhost',
'--master-db=postgres',
'--master-port={0}'.format(master.port)])
self.validate_pb(backup_dir, 'replica')
self.assertEqual(
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
# RESTORE PAGE BACKUP TAKEN FROM replica
node.cleanup()
self.restore_node(
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_make_replica_via_restore(self):
"""
make archive master, take full and page archive backups from master,
set replica, make archive backup from replica
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir="{0}/{1}/master".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica', 'max_wal_senders': '2',
'checkpoint_timeout': '30s'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)
# force more frequent wal switch
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
replica.cleanup()
self.backup_node(backup_dir, 'master', master)
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,256) i")
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
backup_id = self.backup_node(
backup_dir, 'master', master, backup_type='page')
self.restore_node(
backup_dir, 'master', replica,
options=['-R', '--recovery-target-action=promote'])
# Settings for Replica
# self.set_replica(master, replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
replica.start()
# Clean after yourself
self.del_test_dir(module_name, fname)

1243
tests/restore_test.py Normal file

File diff suppressed because it is too large Load Diff

178
tests/retention_test.py Normal file
View File

@ -0,0 +1,178 @@
import os
import unittest
from datetime import datetime, timedelta
from .helpers.ptrack_helpers import ProbackupTest
module_name = 'retention'
class RetentionTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_retention_redundancy_1(self):
"""purge backups using redundancy-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
with open(os.path.join(
backup_dir, 'backups', 'node',
"pg_probackup.conf"), "a") as conf:
conf.write("retention-redundancy = 1\n")
# Make backups to be purged
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, backup_type="page")
# Make backups to be keeped
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, backup_type="page")
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4)
# Purge backups
log = self.delete_expired(backup_dir, 'node')
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
# Check that WAL segments were deleted
min_wal = None
max_wal = None
for line in log.splitlines():
if line.startswith("INFO: removed min WAL segment"):
min_wal = line[31:-1]
elif line.startswith("INFO: removed max WAL segment"):
max_wal = line[31:-1]
if not min_wal:
self.assertTrue(False, "min_wal is empty")
if not max_wal:
self.assertTrue(False, "max_wal is not set")
for wal_name in os.listdir(os.path.join(backup_dir, 'wal', 'node')):
if not wal_name.endswith(".backup"):
# wal_name_b = wal_name.encode('ascii')
self.assertEqual(wal_name[8:] > min_wal[8:], True)
self.assertEqual(wal_name[8:] > max_wal[8:], True)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("123")
def test_retention_window_2(self):
"""purge backups using window-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
with open(
os.path.join(
backup_dir,
'backups',
'node',
"pg_probackup.conf"), "a") as conf:
conf.write("retention-redundancy = 1\n")
conf.write("retention-window = 1\n")
# Make backups to be purged
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, backup_type="page")
# Make backup to be keeped
self.backup_node(backup_dir, 'node', node)
backups = os.path.join(backup_dir, 'backups', 'node')
days_delta = 5
for backup in os.listdir(backups):
if backup == 'pg_probackup.conf':
continue
with open(
os.path.join(
backups, backup, "backup.control"), "a") as conf:
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() - timedelta(days=days_delta)))
days_delta -= 1
# Make backup to be keeped
self.backup_node(backup_dir, 'node', node, backup_type="page")
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4)
# Purge backups
self.delete_expired(backup_dir, 'node')
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("123")
def test_retention_wal(self):
"""purge backups using window-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100500) i")
# Take FULL BACKUP
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100500) i")
self.backup_node(backup_dir, 'node', node)
backups = os.path.join(backup_dir, 'backups', 'node')
days_delta = 5
for backup in os.listdir(backups):
if backup == 'pg_probackup.conf':
continue
with open(
os.path.join(
backups, backup, "backup.control"), "a") as conf:
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
datetime.now() - timedelta(days=days_delta)))
days_delta -= 1
# Make backup to be keeped
self.backup_node(backup_dir, 'node', node, backup_type="page")
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3)
# Purge backups
self.delete_expired(
backup_dir, 'node', options=['--retention-window=2'])
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
# Clean after yourself
self.del_test_dir(module_name, fname)

203
tests/show_test.py Normal file
View File

@ -0,0 +1,203 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
module_name = 'show'
class OptionTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_show_1(self):
"""Status DONE and OK"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.assertEqual(
self.backup_node(
backup_dir, 'node', node,
options=["--log-level-console=panic"]),
None
)
self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_show_json(self):
"""Status DONE and OK"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.assertEqual(
self.backup_node(
backup_dir, 'node', node,
options=["--log-level-console=panic"]),
None
)
self.backup_node(backup_dir, 'node', node)
self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_corrupt_2(self):
"""Status CORRUPT"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
# delete file which belong to backup
file = os.path.join(
backup_dir, "backups", "node",
backup_id, "database", "postgresql.conf")
os.remove(file)
try:
self.validate_pb(backup_dir, 'node', backup_id)
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because backup corrupted.\n"
" Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd
)
)
except ProbackupException as e:
self.assertIn(
'data files are corrupted\n',
e.message,
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(e.message), self.cmd)
)
self.assertIn("CORRUPT", self.show_pb(backup_dir, as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_no_control_file(self):
"""backup.control doesn't exist"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
# delete backup.control file
file = os.path.join(
backup_dir, "backups", "node",
backup_id, "backup.control")
os.remove(file)
self.assertIn('control file "{0}" doesn\'t exist'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_empty_control_file(self):
"""backup.control is empty"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
# truncate backup.control file
file = os.path.join(
backup_dir, "backups", "node",
backup_id, "backup.control")
fd = open(file, 'w')
fd.close()
self.assertIn('control file "{0}" is empty'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_corrupt_control_file(self):
"""backup.control contains invalid option"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
# corrupt backup.control file
file = os.path.join(
backup_dir, "backups", "node",
backup_id, "backup.control")
fd = open(file, 'a')
fd.write("statuss = OK")
fd.close()
self.assertIn('invalid option "statuss" in file'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)

1730
tests/validate_test.py Normal file

File diff suppressed because it is too large Load Diff

66
travis/backup_restore.sh Normal file
View File

@ -0,0 +1,66 @@
#!/bin/sh -ex
# vars
export PGVERSION=9.5.4
export PATH=$PATH:/usr/pgsql-9.5/bin
export PGUSER=pgbench
export PGDATABASE=pgbench
export PGDATA=/var/lib/pgsql/9.5/data
export BACKUP_PATH=/backups
export ARCLOG_PATH=$BACKUP_PATH/backup/pg_xlog
export PGDATA2=/var/lib/pgsql/9.5/data2
export PGBENCH_SCALE=100
export PGBENCH_TIME=60
# prepare directory
cp -a /tests /build
pushd /build
# download postgresql
yum install -y wget
wget -k https://ftp.postgresql.org/pub/source/v$PGVERSION/postgresql-$PGVERSION.tar.gz -O postgresql.tar.gz
tar xf postgresql.tar.gz
# install pg_probackup
yum install -y https://download.postgresql.org/pub/repos/yum/9.5/redhat/rhel-7-x86_64/pgdg-centos95-9.5-2.noarch.rpm
yum install -y postgresql95-devel make gcc readline-devel openssl-devel pam-devel libxml2-devel libxslt-devel
make top_srcdir=postgresql-$PGVERSION
make install top_srcdir=postgresql-$PGVERSION
# initalize cluster and database
yum install -y postgresql95-server
su postgres -c "/usr/pgsql-9.5/bin/initdb -D $PGDATA -k"
cat <<EOF > $PGDATA/pg_hba.conf
local all all trust
host all all 127.0.0.1/32 trust
local replication pgbench trust
host replication pgbench 127.0.0.1/32 trust
EOF
cat <<EOF > $PGDATA/postgresql.auto.conf
max_wal_senders = 2
wal_level = logical
wal_log_hints = on
EOF
su postgres -c "/usr/pgsql-9.5/bin/pg_ctl start -w -D $PGDATA"
su postgres -c "createdb -U postgres $PGUSER"
su postgres -c "createuser -U postgres -a -d -E $PGUSER"
pgbench -i -s $PGBENCH_SCALE
# Count current
COUNT=$(psql -Atc "select count(*) from pgbench_accounts")
pgbench -s $PGBENCH_SCALE -T $PGBENCH_TIME -j 2 -c 10 &
# create backup
pg_probackup init
pg_probackup backup -b full --disable-ptrack-clear --stream -v
pg_probackup show
sleep $PGBENCH_TIME
# restore from backup
chown -R postgres:postgres $BACKUP_PATH
su postgres -c "pg_probackup restore -D $PGDATA2"
# start backup server
su postgres -c "/usr/pgsql-9.5/bin/pg_ctl stop -w -D $PGDATA"
su postgres -c "/usr/pgsql-9.5/bin/pg_ctl start -w -D $PGDATA2"
( psql -Atc "select count(*) from pgbench_accounts" | grep $COUNT ) || (cat $PGDATA2/pg_log/*.log ; exit 1)

240
win32build.pl Normal file
View File

@ -0,0 +1,240 @@
#!/usr/bin/perl
use JSON;
our $repack_version;
our $pgdir;
our $pgsrc;
if (@ARGV!=2) {
print STDERR "Usage $0 postgress-instalation-root pg-source-dir \n";
exit 1;
}
our $liblist="";
$pgdir = shift @ARGV;
$pgsrc = shift @ARGV if @ARGV;
our $arch = $ENV{'ARCH'} || "x64";
$arch='Win32' if ($arch eq 'x86' || $arch eq 'X86');
$arch='x64' if $arch eq 'X64';
$conffile = $pgsrc."/tools/msvc/config.pl";
die 'Could not find config.pl'
unless (-f $conffile);
our $config;
do $conffile;
if (! -d "$pgdir/bin" || !-d "$pgdir/include" || !-d "$pgdir/lib") {
print STDERR "Directory $pgdir doesn't look like root of postgresql installation\n";
exit 1;
}
our $includepath="";
our $libpath="";
our $libpath32="";
AddProject();
print "\n\n";
print $libpath."\n";
print $includepath."\n";
# open F,"<","META.json" or die "Cannot open META.json: $!\n";
# {
# local $/ = undef;
# $decoded = decode_json(<F>);
# $repack_version= $decoded->{'version'};
# }
# substitute new path in the project files
preprocess_project("./msvs/template.pg_probackup.vcxproj","./msvs/pg_probackup.vcxproj");
exit 0;
sub preprocess_project {
my $in = shift;
my $out = shift;
our $pgdir;
our $adddir;
my $libs;
if (defined $adddir) {
$libs ="$adddir;";
} else{
$libs ="";
}
open IN,"<",$in or die "Cannot open $in: $!\n";
open OUT,">",$out or die "Cannot open $out: $!\n";
# $includepath .= ";";
# $libpath .= ";";
while (<IN>) {
s/\@PGROOT\@/$pgdir/g;
s/\@ADDLIBS\@/$libpath/g;
s/\@ADDLIBS32\@/$libpath32/g;
s/\@PGSRC\@/$pgsrc/g;
s/\@ADDINCLUDE\@/$includepath/g;
print OUT $_;
}
close IN;
close OUT;
}
# my sub
sub AddLibrary
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary32
{
$inc = shift;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary64
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
}
sub AddIncludeDir
{
# my ($self, $inc) = @_;
$inc = shift;
if ($includepath ne '')
{
$includepath .= ';';
}
$includepath .= $inc;
}
sub AddProject
{
# my ($self, $name, $type, $folder, $initialdir) = @_;
if ($config->{zlib})
{
AddIncludeDir($config->{zlib} . '\include');
AddLibrary($config->{zlib} . '\lib\zdll.lib');
}
if ($config->{openssl})
{
AddIncludeDir($config->{openssl} . '\include');
if (-e "$config->{openssl}/lib/VC/ssleay32MD.lib")
{
AddLibrary(
$config->{openssl} . '\lib\VC\ssleay32.lib', 1);
AddLibrary(
$config->{openssl} . '\lib\VC\libeay32.lib', 1);
}
else
{
# We don't expect the config-specific library to be here,
# so don't ask for it in last parameter
AddLibrary(
$config->{openssl} . '\lib\ssleay32.lib', 0);
AddLibrary(
$config->{openssl} . '\lib\libeay32.lib', 0);
}
}
if ($config->{nls})
{
AddIncludeDir($config->{nls} . '\include');
AddLibrary($config->{nls} . '\lib\libintl.lib');
}
if ($config->{gss})
{
AddIncludeDir($config->{gss} . '\inc\krb5');
AddLibrary($config->{gss} . '\lib\i386\krb5_32.lib');
AddLibrary($config->{gss} . '\lib\i386\comerr32.lib');
AddLibrary($config->{gss} . '\lib\i386\gssapi32.lib');
}
if ($config->{iconv})
{
AddIncludeDir($config->{iconv} . '\include');
AddLibrary($config->{iconv} . '\lib\iconv.lib');
}
if ($config->{icu})
{
AddIncludeDir($config->{icu} . '\include');
AddLibrary32($config->{icu} . '\lib\icuin.lib');
AddLibrary32($config->{icu} . '\lib\icuuc.lib');
AddLibrary32($config->{icu} . '\lib\icudt.lib');
AddLibrary64($config->{icu} . '\lib64\icuin.lib');
AddLibrary64($config->{icu} . '\lib64\icuuc.lib');
AddLibrary64($config->{icu} . '\lib64\icudt.lib');
}
if ($config->{xml})
{
AddIncludeDir($config->{xml} . '\include');
AddIncludeDir($config->{xml} . '\include\libxml2');
AddLibrary($config->{xml} . '\lib\libxml2.lib');
}
if ($config->{xslt})
{
AddIncludeDir($config->{xslt} . '\include');
AddLibrary($config->{xslt} . '\lib\libxslt.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
# AddLibrary($config->{libedit} . "\\" .
# ($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
AddLibrary32($config->{libedit} . '\\lib32\edit.lib');
AddLibrary64($config->{libedit} . '\\lib64\edit.lib');
}
if ($config->{uuid})
{
AddIncludeDir($config->{uuid} . '\include');
AddLibrary($config->{uuid} . '\lib\uuid.lib');
}
if ($config->{zstd})
{
AddIncludeDir($config->{zstd});
# AddLibrary($config->{zstd}. "\\".($arch eq 'x64'? "zstdlib_x64.lib" : "zstdlib_x86.lib"));
AddLibrary32($config->{zstd}. "\\zstdlib_x86.lib");
AddLibrary64($config->{zstd}. "\\zstdlib_x64.lib") ;
}
# return $proj;
}

240
win32build96.pl Normal file
View File

@ -0,0 +1,240 @@
#!/usr/bin/perl
use JSON;
our $repack_version;
our $pgdir;
our $pgsrc;
if (@ARGV!=2) {
print STDERR "Usage $0 postgress-instalation-root pg-source-dir \n";
exit 1;
}
our $liblist="";
$pgdir = shift @ARGV;
$pgsrc = shift @ARGV if @ARGV;
our $arch = $ENV{'ARCH'} || "x64";
$arch='Win32' if ($arch eq 'x86' || $arch eq 'X86');
$arch='x64' if $arch eq 'X64';
$conffile = $pgsrc."/tools/msvc/config.pl";
die 'Could not find config.pl'
unless (-f $conffile);
our $config;
do $conffile;
if (! -d "$pgdir/bin" || !-d "$pgdir/include" || !-d "$pgdir/lib") {
print STDERR "Directory $pgdir doesn't look like root of postgresql installation\n";
exit 1;
}
our $includepath="";
our $libpath="";
our $libpath32="";
AddProject();
print "\n\n";
print $libpath."\n";
print $includepath."\n";
# open F,"<","META.json" or die "Cannot open META.json: $!\n";
# {
# local $/ = undef;
# $decoded = decode_json(<F>);
# $repack_version= $decoded->{'version'};
# }
# substitute new path in the project files
preprocess_project("./msvs/template.pg_probackup96.vcxproj","./msvs/pg_probackup.vcxproj");
exit 0;
sub preprocess_project {
my $in = shift;
my $out = shift;
our $pgdir;
our $adddir;
my $libs;
if (defined $adddir) {
$libs ="$adddir;";
} else{
$libs ="";
}
open IN,"<",$in or die "Cannot open $in: $!\n";
open OUT,">",$out or die "Cannot open $out: $!\n";
# $includepath .= ";";
# $libpath .= ";";
while (<IN>) {
s/\@PGROOT\@/$pgdir/g;
s/\@ADDLIBS\@/$libpath/g;
s/\@ADDLIBS32\@/$libpath32/g;
s/\@PGSRC\@/$pgsrc/g;
s/\@ADDINCLUDE\@/$includepath/g;
print OUT $_;
}
close IN;
close OUT;
}
# my sub
sub AddLibrary
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary32
{
$inc = shift;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary64
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
}
sub AddIncludeDir
{
# my ($self, $inc) = @_;
$inc = shift;
if ($includepath ne '')
{
$includepath .= ';';
}
$includepath .= $inc;
}
sub AddProject
{
# my ($self, $name, $type, $folder, $initialdir) = @_;
if ($config->{zlib})
{
AddIncludeDir($config->{zlib} . '\include');
AddLibrary($config->{zlib} . '\lib\zdll.lib');
}
if ($config->{openssl})
{
AddIncludeDir($config->{openssl} . '\include');
if (-e "$config->{openssl}/lib/VC/ssleay32MD.lib")
{
AddLibrary(
$config->{openssl} . '\lib\VC\ssleay32.lib', 1);
AddLibrary(
$config->{openssl} . '\lib\VC\libeay32.lib', 1);
}
else
{
# We don't expect the config-specific library to be here,
# so don't ask for it in last parameter
AddLibrary(
$config->{openssl} . '\lib\ssleay32.lib', 0);
AddLibrary(
$config->{openssl} . '\lib\libeay32.lib', 0);
}
}
if ($config->{nls})
{
AddIncludeDir($config->{nls} . '\include');
AddLibrary($config->{nls} . '\lib\libintl.lib');
}
if ($config->{gss})
{
AddIncludeDir($config->{gss} . '\inc\krb5');
AddLibrary($config->{gss} . '\lib\i386\krb5_32.lib');
AddLibrary($config->{gss} . '\lib\i386\comerr32.lib');
AddLibrary($config->{gss} . '\lib\i386\gssapi32.lib');
}
if ($config->{iconv})
{
AddIncludeDir($config->{iconv} . '\include');
AddLibrary($config->{iconv} . '\lib\iconv.lib');
}
if ($config->{icu})
{
AddIncludeDir($config->{icu} . '\include');
AddLibrary32($config->{icu} . '\lib\icuin.lib');
AddLibrary32($config->{icu} . '\lib\icuuc.lib');
AddLibrary32($config->{icu} . '\lib\icudt.lib');
AddLibrary64($config->{icu} . '\lib64\icuin.lib');
AddLibrary64($config->{icu} . '\lib64\icuuc.lib');
AddLibrary64($config->{icu} . '\lib64\icudt.lib');
}
if ($config->{xml})
{
AddIncludeDir($config->{xml} . '\include');
AddIncludeDir($config->{xml} . '\include\libxml2');
AddLibrary($config->{xml} . '\lib\libxml2.lib');
}
if ($config->{xslt})
{
AddIncludeDir($config->{xslt} . '\include');
AddLibrary($config->{xslt} . '\lib\libxslt.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
# AddLibrary($config->{libedit} . "\\" .
# ($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
AddLibrary32($config->{libedit} . '\\lib32\edit.lib');
AddLibrary64($config->{libedit} . '\\lib64\edit.lib');
}
if ($config->{uuid})
{
AddIncludeDir($config->{uuid} . '\include');
AddLibrary($config->{uuid} . '\lib\uuid.lib');
}
if ($config->{zstd})
{
AddIncludeDir($config->{zstd});
# AddLibrary($config->{zstd}. "\\".($arch eq 'x64'? "zstdlib_x64.lib" : "zstdlib_x86.lib"));
AddLibrary32($config->{zstd}. "\\zstdlib_x86.lib");
AddLibrary64($config->{zstd}. "\\zstdlib_x64.lib") ;
}
# return $proj;
}

219
win32build_2.pl Normal file
View File

@ -0,0 +1,219 @@
#!/usr/bin/perl
use JSON;
our $repack_version;
our $pgdir;
our $pgsrc;
if (@ARGV!=2) {
print STDERR "Usage $0 postgress-instalation-root pg-source-dir \n";
exit 1;
}
our $liblist="";
$pgdir = shift @ARGV;
$pgsrc = shift @ARGV if @ARGV;
our $arch = $ENV{'ARCH'} || "x64";
$arch='Win32' if ($arch eq 'x86' || $arch eq 'X86');
$arch='x64' if $arch eq 'X64';
$conffile = $pgsrc."/tools/msvc/config.pl";
die 'Could not find config.pl'
unless (-f $conffile);
our $config;
do $conffile;
if (! -d "$pgdir/bin" || !-d "$pgdir/include" || !-d "$pgdir/lib") {
print STDERR "Directory $pgdir doesn't look like root of postgresql installation\n";
exit 1;
}
our $includepath="";
our $libpath="";
AddProject();
print "\n\n";
print $libpath."\n";
print $includepath."\n";
# open F,"<","META.json" or die "Cannot open META.json: $!\n";
# {
# local $/ = undef;
# $decoded = decode_json(<F>);
# $repack_version= $decoded->{'version'};
# }
# substitute new path in the project files
preprocess_project("./msvs/template.pg_probackup_2.vcxproj","./msvs/pg_probackup.vcxproj");
exit 0;
sub preprocess_project {
my $in = shift;
my $out = shift;
our $pgdir;
our $adddir;
my $libs;
if (defined $adddir) {
$libs ="$adddir;";
} else{
$libs ="";
}
open IN,"<",$in or die "Cannot open $in: $!\n";
open OUT,">",$out or die "Cannot open $out: $!\n";
# $includepath .= ";";
# $libpath .= ";";
while (<IN>) {
s/\@PGROOT\@/$pgdir/g;
s/\@ADDLIBS\@/$libpath/g;
s/\@PGSRC\@/$pgsrc/g;
s/\@ADDINCLUDE\@/$includepath/g;
print OUT $_;
}
close IN;
close OUT;
}
# my sub
sub AddLibrary
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
}
sub AddIncludeDir
{
# my ($self, $inc) = @_;
$inc = shift;
if ($includepath ne '')
{
$includepath .= ';';
}
$includepath .= $inc;
}
sub AddProject
{
# my ($self, $name, $type, $folder, $initialdir) = @_;
if ($config->{zlib})
{
AddIncludeDir($config->{zlib} . '\include');
AddLibrary($config->{zlib} . '\lib\zdll.lib');
}
if ($config->{openssl})
{
AddIncludeDir($config->{openssl} . '\include');
if (-e "$config->{openssl}/lib/VC/ssleay32MD.lib")
{
AddLibrary(
$config->{openssl} . '\lib\VC\ssleay32.lib', 1);
AddLibrary(
$config->{openssl} . '\lib\VC\libeay32.lib', 1);
}
else
{
# We don't expect the config-specific library to be here,
# so don't ask for it in last parameter
AddLibrary(
$config->{openssl} . '\lib\ssleay32.lib', 0);
AddLibrary(
$config->{openssl} . '\lib\libeay32.lib', 0);
}
}
if ($config->{nls})
{
AddIncludeDir($config->{nls} . '\include');
AddLibrary($config->{nls} . '\lib\libintl.lib');
}
if ($config->{gss})
{
AddIncludeDir($config->{gss} . '\inc\krb5');
AddLibrary($config->{gss} . '\lib\i386\krb5_32.lib');
AddLibrary($config->{gss} . '\lib\i386\comerr32.lib');
AddLibrary($config->{gss} . '\lib\i386\gssapi32.lib');
}
if ($config->{iconv})
{
AddIncludeDir($config->{iconv} . '\include');
AddLibrary($config->{iconv} . '\lib\iconv.lib');
}
if ($config->{icu})
{
AddIncludeDir($config->{icu} . '\include');
if ($arch eq 'Win32')
{
AddLibrary($config->{icu} . '\lib\icuin.lib');
AddLibrary($config->{icu} . '\lib\icuuc.lib');
AddLibrary($config->{icu} . '\lib\icudt.lib');
}
else
{
AddLibrary($config->{icu} . '\lib64\icuin.lib');
AddLibrary($config->{icu} . '\lib64\icuuc.lib');
AddLibrary($config->{icu} . '\lib64\icudt.lib');
}
}
if ($config->{xml})
{
AddIncludeDir($config->{xml} . '\include');
AddIncludeDir($config->{xml} . '\include\libxml2');
AddLibrary($config->{xml} . '\lib\libxml2.lib');
}
if ($config->{xslt})
{
AddIncludeDir($config->{xslt} . '\include');
AddLibrary($config->{xslt} . '\lib\libxslt.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
AddLibrary($config->{libedit} . "\\" .
($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
}
if ($config->{uuid})
{
AddIncludeDir($config->{uuid} . '\include');
AddLibrary($config->{uuid} . '\lib\uuid.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
AddLibrary($config->{libedit} . "\\" .
($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
}
if ($config->{zstd})
{
AddIncludeDir($config->{zstd});
AddLibrary($config->{zstd}. "\\".
($arch eq 'x64'? "zstdlib_x64.lib" : "zstdlib_x86.lib")
);
}
# return $proj;
}