1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-03-19 21:38:02 +02:00

Merge branch 'master' into stable

This commit is contained in:
Arthur Zakirov 2018-08-06 12:48:06 +03:00
commit a00d4d1f1a
63 changed files with 6610 additions and 2092 deletions

View File

@ -4,7 +4,8 @@ OBJS = src/backup.o src/catalog.o src/configure.o src/data.o \
src/pg_probackup.o src/restore.o src/show.o src/status.o \
src/util.o src/validate.o src/datapagemap.o src/parsexlog.o \
src/xlogreader.o src/streamutil.o src/receivelog.o \
src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o
src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o \
src/utils/json.o src/utils/thread.o src/merge.o
EXTRA_CLEAN = src/datapagemap.c src/datapagemap.h src/xlogreader.c \
src/receivelog.c src/receivelog.h src/streamutil.c src/streamutil.h src/logging.h
@ -62,6 +63,7 @@ src/streamutil.c: $(top_srcdir)/src/bin/pg_basebackup/streamutil.c
src/streamutil.h: $(top_srcdir)/src/bin/pg_basebackup/streamutil.h
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/streamutil.h $@
ifeq ($(MAJORVERSION),10)
src/walmethods.c: $(top_srcdir)/src/bin/pg_basebackup/walmethods.c
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/walmethods.c $@

1
doit.cmd Normal file
View File

@ -0,0 +1 @@
perl win32build.pl "C:\PgProject\pgwininstall-ee\builddir\distr_X64_10.4.1\postgresql" "C:\PgProject\pgwininstall-ee\builddir\postgresql\postgrespro-enterprise-10.4.1\src"

1
doit96.cmd Normal file
View File

@ -0,0 +1 @@
perl win32build96.pl "C:\PgPro96" "C:\PgProject\pg96ee\postgrespro\src"

190
gen_probackup_project.pl Normal file
View File

@ -0,0 +1,190 @@
# -*-perl-*- hey - emacs - this is a perl file
BEGIN{
use Cwd;
use File::Basename;
my $pgsrc="";
if (@ARGV==1)
{
$pgsrc = shift @ARGV;
if($pgsrc == "--help"){
print STDERR "Usage $0 pg-source-dir \n";
print STDERR "Like this: \n";
print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro \n";
print STDERR "May be need input this before: \n";
print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall\" amd64\n";
exit 1;
}
}
else
{
use Cwd qw(abs_path);
my $path = dirname(abs_path($0));
chdir($path);
chdir("../..");
$pgsrc = cwd();
}
chdir("$pgsrc/src/tools/msvc");
push(@INC, "$pgsrc/src/tools/msvc");
chdir("../../..") if (-d "../msvc" && -d "../../../src");
}
use Win32;
use Carp;
use strict;
use warnings;
use Project;
use Solution;
use File::Copy;
use Config;
use VSObjectFactory;
use List::Util qw(first);
use Exporter;
our (@ISA, @EXPORT_OK);
@ISA = qw(Exporter);
@EXPORT_OK = qw(Mkvcbuild);
my $solution;
my $libpgport;
my $libpgcommon;
my $libpgfeutils;
my $postgres;
my $libpq;
my @unlink_on_exit;
use lib "src/tools/msvc";
use Mkvcbuild;
# if (-e "src/tools/msvc/buildenv.pl")
# {
# do "src/tools/msvc/buildenv.pl";
# }
# elsif (-e "./buildenv.pl")
# {
# do "./buildenv.pl";
# }
# set up the project
our $config;
do "config_default.pl";
do "config.pl" if (-f "src/tools/msvc/config.pl");
# my $vcver = Mkvcbuild::mkvcbuild($config);
my $vcver = build_pgprobackup($config);
# check what sort of build we are doing
my $bconf = $ENV{CONFIG} || "Release";
my $msbflags = $ENV{MSBFLAGS} || "";
my $buildwhat = $ARGV[1] || "";
if (uc($ARGV[0]) eq 'DEBUG')
{
$bconf = "Debug";
}
elsif (uc($ARGV[0]) ne "RELEASE")
{
$buildwhat = $ARGV[0] || "";
}
# ... and do it
system("msbuild pg_probackup.vcxproj /verbosity:normal $msbflags /p:Configuration=$bconf" );
# report status
my $status = $? >> 8;
exit $status;
sub build_pgprobackup
{
our $config = shift;
chdir('../../..') if (-d '../msvc' && -d '../../../src');
die 'Must run from root or msvc directory'
unless (-d 'src/tools/msvc' && -d 'src');
# my $vsVersion = DetermineVisualStudioVersion();
my $vsVersion = '12.00';
$solution = CreateSolution($vsVersion, $config);
$libpq = $solution->AddProject('libpq', 'dll', 'interfaces',
'src/interfaces/libpq');
$libpgfeutils = $solution->AddProject('libpgfeutils', 'lib', 'misc');
$libpgcommon = $solution->AddProject('libpgcommon', 'lib', 'misc');
$libpgport = $solution->AddProject('libpgport', 'lib', 'misc');
#vvs test
my $probackup =
$solution->AddProject('pg_probackup', 'exe', 'pg_probackup'); #, 'contrib/pg_probackup'
$probackup->AddFiles(
'contrib/pg_probackup/src',
'archive.c',
'backup.c',
'catalog.c',
'configure.c',
'data.c',
'delete.c',
'dir.c',
'fetch.c',
'help.c',
'init.c',
'parsexlog.c',
'pg_probackup.c',
'restore.c',
'show.c',
'status.c',
'util.c',
'validate.c'
);
$probackup->AddFiles(
'contrib/pg_probackup/src/utils',
'json.c',
'logger.c',
'parray.c',
'pgut.c',
'thread.c'
);
$probackup->AddFile('src/backend/access/transam/xlogreader.c');
$probackup->AddFiles(
'src/bin/pg_basebackup',
'receivelog.c',
'streamutil.c'
);
if (-e 'src/bin/pg_basebackup/walmethods.c')
{
$probackup->AddFile('src/bin/pg_basebackup/walmethods.c');
}
$probackup->AddFile('src/bin/pg_rewind/datapagemap.c');
$probackup->AddFile('src/interfaces/libpq/pthread-win32.c');
$probackup->AddIncludeDir('src/bin/pg_basebackup');
$probackup->AddIncludeDir('src/bin/pg_rewind');
$probackup->AddIncludeDir('src/interfaces/libpq');
$probackup->AddIncludeDir('src');
$probackup->AddIncludeDir('src/port');
$probackup->AddIncludeDir('contrib/pg_probackup');
$probackup->AddIncludeDir('contrib/pg_probackup/src');
$probackup->AddIncludeDir('contrib/pg_probackup/src/utils');
$probackup->AddReference($libpq, $libpgfeutils, $libpgcommon, $libpgport);
$probackup->AddLibrary('ws2_32.lib');
$probackup->Save();
return $solution->{vcver};
}

28
msvs/pg_probackup.sln Normal file
View File

@ -0,0 +1,28 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Express 2013 for Windows Desktop
VisualStudioVersion = 12.0.31101.0
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "pg_probackup", "pg_probackup.vcxproj", "{4886B21A-D8CA-4A03-BADF-743B24C88327}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
Debug|x64 = Debug|x64
Release|Win32 = Release|Win32
Release|x64 = Release|x64
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|Win32.ActiveCfg = Debug|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|Win32.Build.0 = Debug|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|x64.ActiveCfg = Debug|x64
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|x64.Build.0 = Debug|x64
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|Win32.ActiveCfg = Release|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|Win32.Build.0 = Release|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|x64.ActiveCfg = Release|x64
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|x64.Build.0 = Release|x64
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal

View File

@ -0,0 +1,212 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{4886B21A-D8CA-4A03-BADF-743B24C88327}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>pg_probackup</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\walmethods.c" />
<ClCompile Include="@PGSRC@\bin\pg_rewind\datapagemap.c" />
<ClCompile Include="@PGSRC@\interfaces\libpq\pthread-win32.c" />
<ClCompile Include="..\src\archive.c" />
<ClCompile Include="..\src\backup.c" />
<ClCompile Include="..\src\catalog.c" />
<ClCompile Include="..\src\configure.c" />
<ClCompile Include="..\src\data.c" />
<ClCompile Include="..\src\delete.c" />
<ClCompile Include="..\src\dir.c" />
<ClCompile Include="..\src\fetch.c" />
<ClCompile Include="..\src\help.c" />
<ClCompile Include="..\src\init.c" />
<ClCompile Include="..\src\parsexlog.c" />
<ClCompile Include="..\src\pg_probackup.c" />
<ClCompile Include="..\src\restore.c" />
<ClCompile Include="..\src\show.c" />
<ClCompile Include="..\src\status.c" />
<ClCompile Include="..\src\util.c" />
<ClCompile Include="..\src\utils\logger.c" />
<ClCompile Include="..\src\utils\parray.c" />
<ClCompile Include="..\src\utils\pgut.c" />
<ClCompile Include="..\src\validate.c" />
<ClCompile Include="..\src\utils\json.c" />
<ClCompile Include="..\src\utils\thread.c" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="@PGSRC@\bin\pg_basebackup\receivelog.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\streamutil.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\walmethods.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\datapagemap.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\logging.h" />
<ClInclude Include="..\src\pg_probackup.h" />
<ClInclude Include="..\src\utils\logger.h" />
<ClInclude Include="..\src\utils\parray.h" />
<ClInclude Include="..\src\utils\pgut.h" />
<ClInclude Include="..\src\utils\json.h" />
<ClInclude Include="..\src\utils\thread.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -0,0 +1,210 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{4886B21A-D8CA-4A03-BADF-743B24C88327}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>pg_probackup</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
<ClCompile Include="@PGSRC@\bin\pg_rewind\datapagemap.c" />
<ClCompile Include="@PGSRC@\interfaces\libpq\pthread-win32.c" />
<ClCompile Include="..\src\archive.c" />
<ClCompile Include="..\src\backup.c" />
<ClCompile Include="..\src\catalog.c" />
<ClCompile Include="..\src\configure.c" />
<ClCompile Include="..\src\data.c" />
<ClCompile Include="..\src\delete.c" />
<ClCompile Include="..\src\dir.c" />
<ClCompile Include="..\src\fetch.c" />
<ClCompile Include="..\src\help.c" />
<ClCompile Include="..\src\init.c" />
<ClCompile Include="..\src\parsexlog.c" />
<ClCompile Include="..\src\pg_probackup.c" />
<ClCompile Include="..\src\restore.c" />
<ClCompile Include="..\src\show.c" />
<ClCompile Include="..\src\status.c" />
<ClCompile Include="..\src\util.c" />
<ClCompile Include="..\src\utils\logger.c" />
<ClCompile Include="..\src\utils\parray.c" />
<ClCompile Include="..\src\utils\pgut.c" />
<ClCompile Include="..\src\validate.c" />
<ClCompile Include="..\src\utils\json.c" />
<ClCompile Include="..\src\utils\thread.c" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="@PGSRC@\bin\pg_basebackup\receivelog.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\streamutil.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\datapagemap.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\logging.h" />
<ClInclude Include="..\src\pg_probackup.h" />
<ClInclude Include="..\src\utils\logger.h" />
<ClInclude Include="..\src\utils\parray.h" />
<ClInclude Include="..\src\utils\pgut.h" />
<ClInclude Include="..\src\utils\json.h" />
<ClInclude Include="..\src\utils\thread.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -0,0 +1,203 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{4886B21A-D8CA-4A03-BADF-743B24C88327}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>pg_probackup</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<!-- @PGROOT@\lib;@ADDLIBS@ @PGSRC@ @ADDINCLUDE@ -->
<ItemGroup>
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\walmethods.c" />
<ClCompile Include="@PGSRC@\bin\pg_rewind\datapagemap.c" />
<ClCompile Include="@PGSRC@\interfaces\libpq\pthread-win32.c" />
<ClCompile Include="..\src\archive.c" />
<ClCompile Include="..\src\backup.c" />
<ClCompile Include="..\src\catalog.c" />
<ClCompile Include="..\src\configure.c" />
<ClCompile Include="..\src\data.c" />
<ClCompile Include="..\src\delete.c" />
<ClCompile Include="..\src\dir.c" />
<ClCompile Include="..\src\fetch.c" />
<ClCompile Include="..\src\help.c" />
<ClCompile Include="..\src\init.c" />
<ClCompile Include="..\src\parsexlog.c" />
<ClCompile Include="..\src\pg_probackup.c" />
<ClCompile Include="..\src\restore.c" />
<ClCompile Include="..\src\show.c" />
<ClCompile Include="..\src\status.c" />
<ClCompile Include="..\src\util.c" />
<ClCompile Include="..\src\utils\logger.c" />
<ClCompile Include="..\src\utils\parray.c" />
<ClCompile Include="..\src\utils\pgut.c" />
<ClCompile Include="..\src\validate.c" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="@PGSRC@\bin\pg_basebackup\receivelog.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\streamutil.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\walmethods.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\datapagemap.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\logging.h" />
<ClInclude Include="..\src\pg_probackup.h" />
<ClInclude Include="..\src\utils\logger.h" />
<ClInclude Include="..\src\utils\parray.h" />
<ClInclude Include="..\src\utils\pgut.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -18,16 +18,16 @@
#include <unistd.h>
#include <dirent.h>
#include <time.h>
#include <pthread.h>
#include "libpq/pqsignal.h"
#include "storage/bufpage.h"
#include "catalog/catalog.h"
#include "catalog/pg_tablespace.h"
#include "datapagemap.h"
#include "receivelog.h"
#include "streamutil.h"
#include "libpq/pqsignal.h"
#include "pgtar.h"
#include "receivelog.h"
#include "storage/bufpage.h"
#include "streamutil.h"
#include "utils/thread.h"
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
static XLogRecPtr stop_backup_lsn = InvalidXLogRecPtr;
@ -46,6 +46,9 @@ const char *progname = "pg_probackup";
/* list of files contained in backup */
static parray *backup_files_list = NULL;
/* We need critical section for datapagemap_add() in case of using threads */
static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER;
/*
* We need to wait end of WAL streaming before execute pg_stop_backup().
*/
@ -89,8 +92,8 @@ static bool pg_stop_backup_is_sent = false;
static void backup_cleanup(bool fatal, void *userdata);
static void backup_disconnect(bool fatal, void *userdata);
static void backup_files(void *arg);
static void remote_backup_files(void *arg);
static void *backup_files(void *arg);
static void *remote_backup_files(void *arg);
static void do_backup_instance(void);
@ -101,11 +104,10 @@ static int checkpoint_timeout(void);
//static void backup_list_file(parray *files, const char *root, )
static void parse_backup_filelist_filenames(parray *files, const char *root);
static void write_backup_file_list(parray *files, const char *root);
static void wait_wal_lsn(XLogRecPtr lsn, bool wait_prev_segment);
static void wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup);
static void make_pagemap_from_ptrack(parray *files);
static void StreamLog(void *arg);
static void *StreamLog(void *arg);
static void get_remote_pgdata_filelist(parray *files);
static void ReceiveFileList(parray* files, PGconn *conn, PGresult *res, int rownum);
@ -130,7 +132,6 @@ static void check_system_identifiers(void);
static void confirm_block_size(const char *name, int blcksz);
static void set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i);
#define disconnect_and_exit(code) \
{ \
if (conn != NULL) PQfinish(conn); \
@ -253,7 +254,11 @@ ReceiveFileList(parray* files, PGconn *conn, PGresult *res, int rownum)
else if (copybuf[156] == '2')
{
/* Symlink */
#ifndef WIN32
pgfile->mode |= S_IFLNK;
#else
pgfile->mode |= S_IFDIR;
#endif
}
else
elog(ERROR, "Unrecognized link indicator \"%c\"\n",
@ -289,7 +294,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
DATABASE_DIR);
join_path_components(to_path, database_path, file->path);
out = fopen(to_path, "w");
out = fopen(to_path, PG_BINARY_W);
if (out == NULL)
{
int errno_tmp = errno;
@ -323,7 +328,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
{
write_buffer_size = Min(row_length, sizeof(buf));
memcpy(buf, copybuf, write_buffer_size);
COMP_CRC32C(file->crc, &buf, write_buffer_size);
COMP_CRC32C(file->crc, buf, write_buffer_size);
/* TODO calc checksum*/
if (fwrite(buf, 1, write_buffer_size, out) != write_buffer_size)
@ -353,7 +358,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
elog(ERROR, "final receive failed: status %d ; %s",PQresultStatus(res), PQerrorMessage(conn));
}
file->write_size = file->read_size;
file->write_size = (int64) file->read_size;
FIN_CRC32C(file->crc);
fclose(out);
@ -363,13 +368,13 @@ remote_copy_file(PGconn *conn, pgFile* file)
* Take a remote backup of the PGDATA at a file level.
* Copy all directories and files listed in backup_files_list.
*/
static void
static void *
remote_backup_files(void *arg)
{
int i;
backup_files_args *arguments = (backup_files_args *) arg;
int n_backup_files_list = parray_num(arguments->backup_files_list);
PGconn *file_backup_conn = NULL;
int i;
backup_files_arg *arguments = (backup_files_arg *) arg;
int n_backup_files_list = parray_num(arguments->files_list);
PGconn *file_backup_conn = NULL;
for (i = 0; i < n_backup_files_list; i++)
{
@ -379,13 +384,13 @@ remote_backup_files(void *arg)
pgFile *file;
int row_length;
file = (pgFile *) parray_get(arguments->backup_files_list, i);
file = (pgFile *) parray_get(arguments->files_list, i);
/* We have already copied all directories */
if (S_ISDIR(file->mode))
continue;
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
if (!pg_atomic_test_set_flag(&file->lock))
continue;
file_backup_conn = pgut_connect_replication(pgut_dbname);
@ -434,13 +439,15 @@ remote_backup_files(void *arg)
/* receive the data from stream and write to backup file */
remote_copy_file(file_backup_conn, file);
elog(VERBOSE, "File \"%s\". Copied %lu bytes",
file->path, (unsigned long) file->write_size);
elog(VERBOSE, "File \"%s\". Copied " INT64_FORMAT " bytes",
file->path, file->write_size);
PQfinish(file_backup_conn);
}
/* Data files transferring is successful */
arguments->ret = 0;
return NULL;
}
/*
@ -456,12 +463,12 @@ do_backup_instance(void)
char label[1024];
XLogRecPtr prev_backup_start_lsn = InvalidXLogRecPtr;
pthread_t backup_threads[num_threads];
backup_files_args *backup_threads_args[num_threads];
/* arrays with meta info for multi threaded backup */
pthread_t *threads;
backup_files_arg *threads_args;
bool backup_isok = true;
pgBackup *prev_backup = NULL;
char prev_backup_filelist_path[MAXPGPATH];
parray *prev_backup_filelist = NULL;
elog(LOG, "Database backup start");
@ -493,6 +500,7 @@ do_backup_instance(void)
}
else
current.tli = get_current_timeline(false);
/*
* In incremental backup mode ensure that already-validated
* backup on current timeline exists and get its filelist.
@ -502,10 +510,10 @@ do_backup_instance(void)
current.backup_mode == BACKUP_MODE_DIFF_DELTA)
{
parray *backup_list;
char prev_backup_filelist_path[MAXPGPATH];
/* get list of backups already taken */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
if (backup_list == NULL)
elog(ERROR, "Failed to get backup list.");
prev_backup = catalog_get_last_data_backup(backup_list, current.tli);
if (prev_backup == NULL)
@ -513,8 +521,8 @@ do_backup_instance(void)
"Create new FULL backup before an incremental one.");
parray_free(backup_list);
pgBackupGetPath(prev_backup, prev_backup_filelist_path, lengthof(prev_backup_filelist_path),
DATABASE_FILE_LIST);
pgBackupGetPath(prev_backup, prev_backup_filelist_path,
lengthof(prev_backup_filelist_path), DATABASE_FILE_LIST);
/* Files of previous backup needed by DELTA backup */
prev_backup_filelist = dir_read_file_list(NULL, prev_backup_filelist_path);
@ -592,8 +600,7 @@ do_backup_instance(void)
/* By default there are some error */
stream_thread_arg.ret = 1;
pthread_create(&stream_thread, NULL, (void *(*)(void *)) StreamLog,
&stream_thread_arg);
pthread_create(&stream_thread, NULL, StreamLog, &stream_thread_arg);
}
/* initialize backup list */
@ -605,6 +612,19 @@ do_backup_instance(void)
else
dir_list_file(backup_files_list, pgdata, true, true, false);
/*
* Sort pathname ascending. It is necessary to create intermediate
* directories sequentially.
*
* For example:
* 1 - create 'base'
* 2 - create 'base/1'
*
* Sorted array is used at least in parse_backup_filelist_filenames(),
* extractPageMap(), make_pagemap_from_ptrack().
*/
parray_qsort(backup_files_list, pgFileComparePath);
/* Extract information about files in backup_list parsing their names:*/
parse_backup_filelist_filenames(backup_files_list, pgdata);
@ -633,28 +653,18 @@ do_backup_instance(void)
* For backup from master wait for previous segment.
* For backup from replica wait for current segment.
*/
!from_replica, backup_files_list);
!current.from_replica, backup_files_list);
}
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
else if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
{
parray_qsort(backup_files_list, pgFileComparePath);
/*
* Build the page map from ptrack information.
*/
make_pagemap_from_ptrack(backup_files_list);
}
/*
* Sort pathname ascending. It is necessary to create intermediate
* directories sequentially.
*
* For example:
* 1 - create 'base'
* 2 - create 'base/1'
*/
parray_qsort(backup_files_list, pgFileComparePath);
/*
* Make directories before backup
* and setup threads at the same time
* Make directories before backup and setup threads at the same time
*/
for (i = 0; i < parray_num(backup_files_list); i++)
{
@ -681,53 +691,54 @@ do_backup_instance(void)
}
/* setup threads */
__sync_lock_release(&file->lock);
pg_atomic_clear_flag(&file->lock);
}
/* sort by size for load balancing */
/* Sort by size for load balancing */
parray_qsort(backup_files_list, pgFileCompareSize);
/* Sort the array for binary search */
if (prev_backup_filelist)
parray_qsort(prev_backup_filelist, pgFileComparePath);
/* init thread args with own file lists */
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
threads_args = (backup_files_arg *) palloc(sizeof(backup_files_arg)*num_threads);
for (i = 0; i < num_threads; i++)
{
backup_files_args *arg = pg_malloc(sizeof(backup_files_args));
backup_files_arg *arg = &(threads_args[i]);
arg->from_root = pgdata;
arg->to_root = database_path;
arg->backup_files_list = backup_files_list;
arg->prev_backup_filelist = prev_backup_filelist;
arg->prev_backup_start_lsn = prev_backup_start_lsn;
arg->thread_backup_conn = NULL;
arg->thread_cancel_conn = NULL;
arg->files_list = backup_files_list;
arg->prev_filelist = prev_backup_filelist;
arg->prev_start_lsn = prev_backup_start_lsn;
arg->backup_conn = NULL;
arg->cancel_conn = NULL;
/* By default there are some error */
arg->ret = 1;
backup_threads_args[i] = arg;
}
/* Run threads */
elog(LOG, "Start transfering data files");
for (i = 0; i < num_threads; i++)
{
backup_files_arg *arg = &(threads_args[i]);
elog(VERBOSE, "Start thread num: %i", i);
if (!is_remote_backup)
pthread_create(&backup_threads[i], NULL,
(void *(*)(void *)) backup_files,
backup_threads_args[i]);
pthread_create(&threads[i], NULL, backup_files, arg);
else
pthread_create(&backup_threads[i], NULL,
(void *(*)(void *)) remote_backup_files,
backup_threads_args[i]);
pthread_create(&threads[i], NULL, remote_backup_files, arg);
}
/* Wait threads */
for (i = 0; i < num_threads; i++)
{
pthread_join(backup_threads[i], NULL);
if (backup_threads_args[i]->ret == 1)
pthread_join(threads[i], NULL);
if (threads_args[i].ret == 1)
backup_isok = false;
pg_free(backup_threads_args[i]);
}
if (backup_isok)
elog(LOG, "Data files are transfered");
@ -758,12 +769,14 @@ do_backup_instance(void)
for (i = 0; i < parray_num(xlog_files_list); i++)
{
pgFile *file = (pgFile *) parray_get(xlog_files_list, i);
if (S_ISREG(file->mode))
calc_file_checksum(file);
/* Remove file path root prefix*/
if (strstr(file->path, database_path) == file->path)
{
char *ptr = file->path;
file->path = pstrdup(GetRelativePath(ptr, database_path));
free(ptr);
}
@ -775,7 +788,7 @@ do_backup_instance(void)
}
/* Print the list of files to backup catalog */
write_backup_file_list(backup_files_list, pgdata);
pgBackupWriteFileList(&current, backup_files_list, pgdata);
/* Compute summary of size of regular files in the backup */
for (i = 0; i < parray_num(backup_files_list); i++)
@ -815,11 +828,15 @@ do_backup(time_t start_time)
pgut_atexit_push(backup_disconnect, NULL);
current.primary_conninfo = pgut_get_conninfo_string(backup_conn);
current.compress_alg = compress_alg;
current.compress_level = compress_level;
/* Confirm data block size and xlog block size are compatible */
confirm_block_size("block_size", BLCKSZ);
confirm_block_size("wal_block_size", XLOG_BLCKSZ);
from_replica = pg_is_in_recovery();
current.from_replica = pg_is_in_recovery();
/* Confirm that this server version is supported */
check_server_version();
@ -837,7 +854,7 @@ do_backup(time_t start_time)
elog(WARNING, "This PostgreSQL instance was initialized without data block checksums. "
"pg_probackup have no way to detect data block corruption without them. "
"Reinitialize PGDATA with option '--data-checksums'.");
StrNCpy(current.server_version, server_version_str,
sizeof(current.server_version));
current.stream = stream_wal;
@ -859,7 +876,7 @@ do_backup(time_t start_time)
}
}
if (from_replica)
if (current.from_replica)
{
/* Check master connection options */
if (master_host == NULL)
@ -956,7 +973,7 @@ check_server_version(void)
"server version is %s, must be %s or higher",
server_version_str, "9.5");
if (from_replica && server_version < 90600)
if (current.from_replica && server_version < 90600)
elog(ERROR,
"server version is %s, must be %s or higher for backup from replica",
server_version_str, "9.6");
@ -972,23 +989,23 @@ check_server_version(void)
if (PQresultStatus(res) == PGRES_FATAL_ERROR)
/* It seems we connected to PostgreSQL (not Postgres Pro) */
elog(ERROR, "%s was built with Postgres Pro %s %s, "
"but connection made with PostgreSQL %s",
"but connection is made with PostgreSQL %s",
PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION, server_version_str);
else if (strcmp(server_version_str, PG_MAJORVERSION) != 0 &&
strcmp(PQgetvalue(res, 0, 0), PGPRO_EDITION) != 0)
elog(ERROR, "%s was built with Postgres Pro %s %s, "
"but connection made with Postgres Pro %s %s",
"but connection is made with Postgres Pro %s %s",
PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION,
server_version_str, PQgetvalue(res, 0, 0));
#else
if (PQresultStatus(res) != PGRES_FATAL_ERROR)
/* It seems we connected to Postgres Pro (not PostgreSQL) */
elog(ERROR, "%s was built with PostgreSQL %s, "
"but connection made with Postgres Pro %s %s",
"but connection is made with Postgres Pro %s %s",
PROGRAM_NAME, PG_MAJORVERSION,
server_version_str, PQgetvalue(res, 0, 0));
else if (strcmp(server_version_str, PG_MAJORVERSION) != 0)
elog(ERROR, "%s was built with PostgreSQL %s, but connection made with %s",
elog(ERROR, "%s was built with PostgreSQL %s, but connection is made with %s",
PROGRAM_NAME, PG_MAJORVERSION, server_version_str);
#endif
@ -1038,12 +1055,12 @@ confirm_block_size(const char *name, int blcksz)
elog(ERROR, "cannot get %s: %s", name, PQerrorMessage(backup_conn));
block_size = strtol(PQgetvalue(res, 0, 0), &endp, 10);
PQclear(res);
if ((endp && *endp) || block_size != blcksz)
elog(ERROR,
"%s(%d) is not compatible(%d expected)",
name, block_size, blcksz);
PQclear(res);
}
/*
@ -1061,7 +1078,7 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
params[0] = label;
/* For replica we call pg_start_backup() on master */
conn = (from_replica) ? master_conn : backup_conn;
conn = (backup->from_replica) ? master_conn : backup_conn;
/* 2nd argument is 'fast'*/
params[1] = smooth ? "false" : "true";
@ -1076,6 +1093,12 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
2,
params);
/*
* Set flag that pg_start_backup() was called. If an error will happen it
* is necessary to call pg_stop_backup() in backup_cleanup().
*/
backup_in_progress = true;
/* Extract timeline and LSN from results of pg_start_backup() */
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
/* Calculate LSN */
@ -1106,14 +1129,8 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
}
/* Wait for start_lsn to be replayed by replica */
if (from_replica)
if (backup->from_replica)
wait_replica_wal_lsn(backup->start_lsn, true);
/*
* Set flag that pg_start_backup() was called. If an error will happen it
* is necessary to call pg_stop_backup() in backup_cleanup().
*/
backup_in_progress = true;
}
/*
@ -1555,8 +1572,6 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
{
uint32 try_count = 0;
Assert(from_replica);
while (true)
{
PGresult *res;
@ -1651,7 +1666,7 @@ pg_stop_backup(pgBackup *backup)
elog(FATAL, "backup is not in progress");
/* For replica we call pg_stop_backup() on master */
conn = (from_replica) ? master_conn : backup_conn;
conn = (current.from_replica) ? master_conn : backup_conn;
/* Remove annoying NOTICE messages generated by backend */
res = pgut_execute(conn, "SET client_min_messages = warning;",
@ -1664,7 +1679,7 @@ pg_stop_backup(pgBackup *backup)
const char *params[1];
char name[1024];
if (!from_replica)
if (!current.from_replica)
snprintf(name, lengthof(name), "pg_probackup, backup_id %s",
base36enc(backup->start_time));
else
@ -1800,7 +1815,7 @@ pg_stop_backup(pgBackup *backup)
/* Write backup_label */
join_path_components(backup_label, path, PG_BACKUP_LABEL_FILE);
fp = fopen(backup_label, "w");
fp = fopen(backup_label, PG_BINARY_W);
if (fp == NULL)
elog(ERROR, "can't open backup label file \"%s\": %s",
backup_label, strerror(errno));
@ -1831,7 +1846,7 @@ pg_stop_backup(pgBackup *backup)
elog(ERROR,
"result of txid_snapshot_xmax() is invalid: %s",
PQgetvalue(res, 0, 0));
if (!parse_time(PQgetvalue(res, 0, 1), &recovery_time))
if (!parse_time(PQgetvalue(res, 0, 1), &recovery_time, true))
elog(ERROR,
"result of current_timestamp is invalid: %s",
PQgetvalue(res, 0, 1));
@ -1848,7 +1863,7 @@ pg_stop_backup(pgBackup *backup)
char tablespace_map[MAXPGPATH];
join_path_components(tablespace_map, path, PG_TABLESPACE_MAP_FILE);
fp = fopen(tablespace_map, "w");
fp = fopen(tablespace_map, PG_BINARY_W);
if (fp == NULL)
elog(ERROR, "can't open tablespace map file \"%s\": %s",
tablespace_map, strerror(errno));
@ -1892,7 +1907,7 @@ pg_stop_backup(pgBackup *backup)
stream_xlog_path[MAXPGPATH];
/* Wait for stop_lsn to be received by replica */
if (from_replica)
if (backup->from_replica)
wait_replica_wal_lsn(stop_backup_lsn, false);
/*
* Wait for stop_lsn to be archived or streamed.
@ -2004,22 +2019,22 @@ backup_disconnect(bool fatal, void *userdata)
* In incremental backup mode, copy only files or datafiles' pages changed after
* previous backup.
*/
static void
static void *
backup_files(void *arg)
{
int i;
backup_files_args *arguments = (backup_files_args *) arg;
int n_backup_files_list = parray_num(arguments->backup_files_list);
int i;
backup_files_arg *arguments = (backup_files_arg *) arg;
int n_backup_files_list = parray_num(arguments->files_list);
/* backup a file */
for (i = 0; i < n_backup_files_list; i++)
{
int ret;
struct stat buf;
pgFile *file = (pgFile *) parray_get(arguments->files_list, i);
pgFile *file = (pgFile *) parray_get(arguments->backup_files_list, i);
elog(VERBOSE, "Copying file: \"%s\" ", file->path);
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
if (!pg_atomic_test_set_flag(&file->lock))
continue;
/* check for interrupt */
@ -2061,69 +2076,69 @@ backup_files(void *arg)
/* Check that file exist in previous backup */
if (current.backup_mode != BACKUP_MODE_FULL)
{
int p;
char *relative;
int n_prev_backup_files_list = parray_num(arguments->prev_backup_filelist);
pgFile key;
pgFile **prev_file;
relative = GetRelativePath(file->path, arguments->from_root);
for (p = 0; p < n_prev_backup_files_list; p++)
{
pgFile *prev_file = (pgFile *) parray_get(arguments->prev_backup_filelist, p);
if (strcmp(relative, prev_file->path) == 0)
{
/* File exists in previous backup */
file->exists_in_prev = true;
// elog(VERBOSE, "File exists at the time of previous backup %s", relative);
break;
}
}
key.path = relative;
prev_file = (pgFile **) parray_bsearch(arguments->prev_filelist,
&key, pgFileComparePath);
if (prev_file)
/* File exists in previous backup */
file->exists_in_prev = true;
}
/* copy the file into backup */
if (file->is_datafile && !file->is_cfs)
{
char to_path[MAXPGPATH];
join_path_components(to_path, arguments->to_root,
file->path + strlen(arguments->from_root) + 1);
/* backup block by block if datafile AND not compressed by cfs*/
if (!backup_data_file(arguments,
arguments->from_root,
arguments->to_root, file,
arguments->prev_backup_start_lsn,
current.backup_mode))
if (!backup_data_file(arguments, to_path, file,
arguments->prev_start_lsn,
current.backup_mode,
compress_alg, compress_level))
{
file->write_size = BYTES_INVALID;
elog(VERBOSE, "File \"%s\" was not copied to backup", file->path);
continue;
}
}
else
/* TODO:
* Check if file exists in previous backup
* If exists:
* if mtime > start_backup_time of parent backup,
* copy file to backup
* if mtime < start_backup_time
* calculate crc, compare crc to old file
* if crc is the same -> skip file
*/
if (!copy_file(arguments->from_root,
arguments->to_root,
file))
/* TODO:
* Check if file exists in previous backup
* If exists:
* if mtime > start_backup_time of parent backup,
* copy file to backup
* if mtime < start_backup_time
* calculate crc, compare crc to old file
* if crc is the same -> skip file
*/
else if (!copy_file(arguments->from_root, arguments->to_root, file))
{
file->write_size = BYTES_INVALID;
elog(VERBOSE, "File \"%s\" was not copied to backup", file->path);
continue;
}
elog(VERBOSE, "File \"%s\". Copied %lu bytes",
file->path, (unsigned long) file->write_size);
elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes",
file->path, file->write_size);
}
else
elog(LOG, "unexpected file type %d", buf.st_mode);
}
/* Close connection */
if (arguments->thread_backup_conn)
pgut_disconnect(arguments->thread_backup_conn);
if (arguments->backup_conn)
pgut_disconnect(arguments->backup_conn);
/* Data files transferring is successful */
arguments->ret = 0;
return NULL;
}
/*
@ -2261,52 +2276,6 @@ set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i)
free(cfs_tblspc_path);
}
/*
* Output the list of files to backup catalog DATABASE_FILE_LIST
*/
static void
write_backup_file_list(parray *files, const char *root)
{
FILE *fp;
char path[MAXPGPATH];
pgBackupGetPath(&current, path, lengthof(path), DATABASE_FILE_LIST);
fp = fopen(path, "wt");
if (fp == NULL)
elog(ERROR, "cannot open file list \"%s\": %s", path,
strerror(errno));
print_file_list(fp, files, root);
if (fflush(fp) != 0 ||
fsync(fileno(fp)) != 0 ||
fclose(fp))
elog(ERROR, "cannot write file list \"%s\": %s", path, strerror(errno));
}
/*
* A helper function to create the path of a relation file and segment.
* The returned path is palloc'd
*/
static char *
datasegpath(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
{
char *path;
char *segpath;
path = relpathperm(rnode, forknum);
if (segno > 0)
{
segpath = psprintf("%s.%u", path, segno);
pfree(path);
return segpath;
}
else
return path;
}
/*
* Find pgfile by given rnode in the backup_files_list
* and add given blkno to its pagemap.
@ -2314,30 +2283,28 @@ datasegpath(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
void
process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
{
char *path;
char *rel_path;
char *path;
char *rel_path;
BlockNumber blkno_inseg;
int segno;
pgFile *file_item = NULL;
int j;
pgFile **file_item;
pgFile f;
segno = blkno / RELSEG_SIZE;
blkno_inseg = blkno % RELSEG_SIZE;
rel_path = datasegpath(rnode, forknum, segno);
path = pg_malloc(strlen(rel_path) + strlen(pgdata) + 2);
sprintf(path, "%s/%s", pgdata, rel_path);
rel_path = relpathperm(rnode, forknum);
if (segno > 0)
path = psprintf("%s/%s.%u", pgdata, rel_path, segno);
else
path = psprintf("%s/%s", pgdata, rel_path);
for (j = 0; j < parray_num(backup_files_list); j++)
{
pgFile *p = (pgFile *) parray_get(backup_files_list, j);
pg_free(rel_path);
if (strcmp(p->path, path) == 0)
{
file_item = p;
break;
}
}
f.path = path;
/* backup_files_list should be sorted before */
file_item = (pgFile **) parray_bsearch(backup_files_list, &f,
pgFileComparePath);
/*
* If we don't have any record of this file in the file map, it means
@ -2346,10 +2313,18 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
* backup would simply copy it as-is.
*/
if (file_item)
datapagemap_add(&file_item->pagemap, blkno_inseg);
{
/* We need critical section only we use more than one threads */
if (num_threads > 1)
pthread_lock(&backup_pagemap_mutex);
datapagemap_add(&(*file_item)->pagemap, blkno_inseg);
if (num_threads > 1)
pthread_mutex_unlock(&backup_pagemap_mutex);
}
pg_free(path);
pg_free(rel_path);
}
/*
@ -2399,12 +2374,12 @@ make_pagemap_from_ptrack(parray *files)
if (file->is_datafile)
{
if (file->tblspcOid == tblspcOid_with_ptrack_init
&& file->dbOid == dbOid_with_ptrack_init)
if (file->tblspcOid == tblspcOid_with_ptrack_init &&
file->dbOid == dbOid_with_ptrack_init)
{
/* ignore ptrack if ptrack_init exists */
elog(VERBOSE, "Ignoring ptrack because of ptrack_init for file: %s", file->path);
file->pagemap.bitmapsize = PageBitmapIsAbsent;
file->pagemap_isabsent = true;
continue;
}
@ -2432,19 +2407,32 @@ make_pagemap_from_ptrack(parray *files)
*/
start_addr = (RELSEG_SIZE/HEAPBLOCKS_PER_BYTE)*file->segno;
if (start_addr + RELSEG_SIZE/HEAPBLOCKS_PER_BYTE > ptrack_nonparsed_size)
/*
* If file segment was created after we have read ptrack,
* we won't have a bitmap for this segment.
*/
if (start_addr > ptrack_nonparsed_size)
{
file->pagemap.bitmapsize = ptrack_nonparsed_size - start_addr;
elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
elog(VERBOSE, "Ptrack is missing for file: %s", file->path);
file->pagemap_isabsent = true;
}
else
{
file->pagemap.bitmapsize = RELSEG_SIZE/HEAPBLOCKS_PER_BYTE;
elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
}
file->pagemap.bitmap = pg_malloc(file->pagemap.bitmapsize);
memcpy(file->pagemap.bitmap, ptrack_nonparsed+start_addr, file->pagemap.bitmapsize);
if (start_addr + RELSEG_SIZE/HEAPBLOCKS_PER_BYTE > ptrack_nonparsed_size)
{
file->pagemap.bitmapsize = ptrack_nonparsed_size - start_addr;
elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
}
else
{
file->pagemap.bitmapsize = RELSEG_SIZE/HEAPBLOCKS_PER_BYTE;
elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
}
file->pagemap.bitmap = pg_malloc(file->pagemap.bitmapsize);
memcpy(file->pagemap.bitmap, ptrack_nonparsed+start_addr, file->pagemap.bitmapsize);
}
}
else
{
@ -2457,7 +2445,7 @@ make_pagemap_from_ptrack(parray *files)
* - target relation was deleted.
*/
elog(VERBOSE, "Ptrack is missing for file: %s", file->path);
file->pagemap.bitmapsize = PageBitmapIsAbsent;
file->pagemap_isabsent = true;
}
}
}
@ -2533,7 +2521,7 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
/*
* Start the log streaming
*/
static void
static void *
StreamLog(void *arg)
{
XLogRecPtr startpos;
@ -2607,6 +2595,8 @@ StreamLog(void *arg)
PQfinish(stream_arg->conn);
stream_arg->conn = NULL;
return NULL;
}
/*
@ -2633,7 +2623,7 @@ get_last_ptrack_lsn(void)
}
char *
pg_ptrack_get_block(backup_files_args *arguments,
pg_ptrack_get_block(backup_files_arg *arguments,
Oid dbOid,
Oid tblsOid,
Oid relOid,
@ -2658,17 +2648,17 @@ pg_ptrack_get_block(backup_files_args *arguments,
sprintf(params[2], "%i", relOid);
sprintf(params[3], "%u", blknum);
if (arguments->thread_backup_conn == NULL)
if (arguments->backup_conn == NULL)
{
arguments->thread_backup_conn = pgut_connect(pgut_dbname);
arguments->backup_conn = pgut_connect(pgut_dbname);
}
if (arguments->thread_cancel_conn == NULL)
arguments->thread_cancel_conn = PQgetCancel(arguments->thread_backup_conn);
if (arguments->cancel_conn == NULL)
arguments->cancel_conn = PQgetCancel(arguments->backup_conn);
//elog(LOG, "db %i pg_ptrack_get_block(%i, %i, %u)",dbOid, tblsOid, relOid, blknum);
res = pgut_execute_parallel(arguments->thread_backup_conn,
arguments->thread_cancel_conn,
res = pgut_execute_parallel(arguments->backup_conn,
arguments->cancel_conn,
"SELECT pg_catalog.pg_ptrack_get_block_2($1, $2, $3, $4)",
4, (const char **)params, true);

View File

@ -12,7 +12,6 @@
#include <dirent.h>
#include <fcntl.h>
#include <libgen.h>
#include <signal.h>
#include <sys/file.h>
#include <sys/stat.h>
@ -251,14 +250,15 @@ IsDir(const char *dirpath, const char *entry)
parray *
catalog_get_backup_list(time_t requested_backup_id)
{
DIR *date_dir = NULL;
struct dirent *date_ent = NULL;
DIR *data_dir = NULL;
struct dirent *data_ent = NULL;
parray *backups = NULL;
pgBackup *backup = NULL;
int i;
/* open backup instance backups directory */
date_dir = opendir(backup_instance_path);
if (date_dir == NULL)
data_dir = opendir(backup_instance_path);
if (data_dir == NULL)
{
elog(WARNING, "cannot open directory \"%s\": %s", backup_instance_path,
strerror(errno));
@ -267,22 +267,23 @@ catalog_get_backup_list(time_t requested_backup_id)
/* scan the directory and list backups */
backups = parray_new();
for (; (date_ent = readdir(date_dir)) != NULL; errno = 0)
for (; (data_ent = readdir(data_dir)) != NULL; errno = 0)
{
char backup_conf_path[MAXPGPATH];
char date_path[MAXPGPATH];
char data_path[MAXPGPATH];
/* skip not-directory entries and hidden entries */
if (!IsDir(backup_instance_path, date_ent->d_name)
|| date_ent->d_name[0] == '.')
if (!IsDir(backup_instance_path, data_ent->d_name)
|| data_ent->d_name[0] == '.')
continue;
/* open subdirectory of specific backup */
join_path_components(date_path, backup_instance_path, date_ent->d_name);
join_path_components(data_path, backup_instance_path, data_ent->d_name);
/* read backup information from BACKUP_CONTROL_FILE */
snprintf(backup_conf_path, MAXPGPATH, "%s/%s", date_path, BACKUP_CONTROL_FILE);
snprintf(backup_conf_path, MAXPGPATH, "%s/%s", data_path, BACKUP_CONTROL_FILE);
backup = readBackupControlFile(backup_conf_path);
backup->backup_id = backup->start_time;
/* ignore corrupted backups */
if (backup)
@ -299,8 +300,8 @@ catalog_get_backup_list(time_t requested_backup_id)
if (errno && errno != ENOENT)
{
elog(WARNING, "cannot read date directory \"%s\": %s",
date_ent->d_name, strerror(errno));
elog(WARNING, "cannot read data directory \"%s\": %s",
data_ent->d_name, strerror(errno));
goto err_proc;
}
}
@ -311,21 +312,48 @@ catalog_get_backup_list(time_t requested_backup_id)
goto err_proc;
}
closedir(date_dir);
date_dir = NULL;
closedir(data_dir);
data_dir = NULL;
parray_qsort(backups, pgBackupCompareIdDesc);
/* Link incremental backups with their ancestors.*/
for (i = 0; i < parray_num(backups); i++)
{
pgBackup *curr = parray_get(backups, i);
int j;
if (curr->backup_mode == BACKUP_MODE_FULL)
continue;
for (j = i+1; j < parray_num(backups); j++)
{
pgBackup *ancestor = parray_get(backups, j);
if (ancestor->start_time == curr->parent_backup)
{
curr->parent_backup_link = ancestor;
/* elog(INFO, "curr %s, ancestor %s j=%d", base36enc_dup(curr->start_time),
base36enc_dup(ancestor->start_time), j); */
break;
}
}
}
return backups;
err_proc:
if (date_dir)
closedir(date_dir);
if (data_dir)
closedir(data_dir);
if (backup)
pgBackupFree(backup);
if (backups)
parray_walk(backups, pgBackupFree);
parray_free(backups);
elog(ERROR, "Failed to get backup list");
return NULL;
}
@ -385,15 +413,17 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
fprintf(out, "#Configuration\n");
fprintf(out, "backup-mode = %s\n", pgBackupGetBackupMode(backup));
fprintf(out, "stream = %s\n", backup->stream?"true":"false");
fprintf(out, "compress-alg = %s\n", deparse_compress_alg(compress_alg));
fprintf(out, "compress-level = %d\n", compress_level);
fprintf(out, "from-replica = %s\n", from_replica?"true":"false");
fprintf(out, "stream = %s\n", backup->stream ? "true" : "false");
fprintf(out, "compress-alg = %s\n",
deparse_compress_alg(backup->compress_alg));
fprintf(out, "compress-level = %d\n", backup->compress_level);
fprintf(out, "from-replica = %s\n", backup->from_replica ? "true" : "false");
fprintf(out, "\n#Compatibility\n");
fprintf(out, "block-size = %u\n", backup->block_size);
fprintf(out, "xlog-block-size = %u\n", backup->wal_block_size);
fprintf(out, "checksum-version = %u\n", backup->checksum_version);
fprintf(out, "program-version = %s\n", PROGRAM_VERSION);
if (backup->server_version[0] != '\0')
fprintf(out, "server-version = %s\n", backup->server_version);
@ -429,7 +459,7 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
if (backup->data_bytes != BYTES_INVALID)
fprintf(out, "data-bytes = " INT64_FORMAT "\n", backup->data_bytes);
if (backup->data_bytes != BYTES_INVALID)
if (backup->wal_bytes != BYTES_INVALID)
fprintf(out, "wal-bytes = " INT64_FORMAT "\n", backup->wal_bytes);
fprintf(out, "status = %s\n", status2str(backup->status));
@ -461,6 +491,30 @@ pgBackupWriteBackupControlFile(pgBackup *backup)
fclose(fp);
}
/*
* Output the list of files to backup catalog DATABASE_FILE_LIST
*/
void
pgBackupWriteFileList(pgBackup *backup, parray *files, const char *root)
{
FILE *fp;
char path[MAXPGPATH];
pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST);
fp = fopen(path, "wt");
if (fp == NULL)
elog(ERROR, "cannot open file list \"%s\": %s", path,
strerror(errno));
print_file_list(fp, files, root);
if (fflush(fp) != 0 ||
fsync(fileno(fp)) != 0 ||
fclose(fp))
elog(ERROR, "cannot write file list \"%s\": %s", path, strerror(errno));
}
/*
* Read BACKUP_CONTROL_FILE and create pgBackup.
* - Comment starts with ';'.
@ -475,10 +529,10 @@ readBackupControlFile(const char *path)
char *stop_lsn = NULL;
char *status = NULL;
char *parent_backup = NULL;
char *compress_alg = NULL;
char *program_version = NULL;
char *server_version = NULL;
int *compress_level;
bool *from_replica;
char *compress_alg = NULL;
int parsed_options;
pgut_option options[] =
{
@ -495,22 +549,41 @@ readBackupControlFile(const char *path)
{'u', 0, "block-size", &backup->block_size, SOURCE_FILE_STRICT},
{'u', 0, "xlog-block-size", &backup->wal_block_size, SOURCE_FILE_STRICT},
{'u', 0, "checksum-version", &backup->checksum_version, SOURCE_FILE_STRICT},
{'s', 0, "program-version", &program_version, SOURCE_FILE_STRICT},
{'s', 0, "server-version", &server_version, SOURCE_FILE_STRICT},
{'b', 0, "stream", &backup->stream, SOURCE_FILE_STRICT},
{'s', 0, "status", &status, SOURCE_FILE_STRICT},
{'s', 0, "parent-backup-id", &parent_backup, SOURCE_FILE_STRICT},
{'s', 0, "compress-alg", &compress_alg, SOURCE_FILE_STRICT},
{'u', 0, "compress-level", &compress_level, SOURCE_FILE_STRICT},
{'b', 0, "from-replica", &from_replica, SOURCE_FILE_STRICT},
{'u', 0, "compress-level", &backup->compress_level, SOURCE_FILE_STRICT},
{'b', 0, "from-replica", &backup->from_replica, SOURCE_FILE_STRICT},
{'s', 0, "primary-conninfo", &backup->primary_conninfo, SOURCE_FILE_STRICT},
{0}
};
if (access(path, F_OK) != 0)
{
elog(WARNING, "control file \"%s\" doesn't exist", path);
pgBackupFree(backup);
return NULL;
}
pgBackup_init(backup);
pgut_readopt(path, options, ERROR);
pgBackupInit(backup);
parsed_options = pgut_readopt(path, options, WARNING);
if (parsed_options == 0)
{
elog(WARNING, "control file \"%s\" is empty", path);
pgBackupFree(backup);
return NULL;
}
if (backup->start_time == 0)
{
elog(WARNING, "invalid ID/start-time, control file \"%s\" is corrupted", path);
pgBackupFree(backup);
return NULL;
}
if (backup_mode)
{
@ -546,10 +619,12 @@ readBackupControlFile(const char *path)
{
if (strcmp(status, "OK") == 0)
backup->status = BACKUP_STATUS_OK;
else if (strcmp(status, "RUNNING") == 0)
backup->status = BACKUP_STATUS_RUNNING;
else if (strcmp(status, "ERROR") == 0)
backup->status = BACKUP_STATUS_ERROR;
else if (strcmp(status, "RUNNING") == 0)
backup->status = BACKUP_STATUS_RUNNING;
else if (strcmp(status, "MERGING") == 0)
backup->status = BACKUP_STATUS_MERGING;
else if (strcmp(status, "DELETING") == 0)
backup->status = BACKUP_STATUS_DELETING;
else if (strcmp(status, "DELETED") == 0)
@ -571,6 +646,13 @@ readBackupControlFile(const char *path)
free(parent_backup);
}
if (program_version)
{
StrNCpy(backup->program_version, program_version,
sizeof(backup->program_version));
pfree(program_version);
}
if (server_version)
{
StrNCpy(backup->server_version, server_version,
@ -578,6 +660,9 @@ readBackupControlFile(const char *path)
pfree(server_version);
}
if (compress_alg)
backup->compress_alg = parse_compress_alg(compress_alg);
return backup;
}
@ -626,11 +711,106 @@ deparse_backup_mode(BackupMode mode)
return NULL;
}
CompressAlg
parse_compress_alg(const char *arg)
{
size_t len;
/* Skip all spaces detected */
while (isspace((unsigned char)*arg))
arg++;
len = strlen(arg);
if (len == 0)
elog(ERROR, "compress algrorithm is empty");
if (pg_strncasecmp("zlib", arg, len) == 0)
return ZLIB_COMPRESS;
else if (pg_strncasecmp("pglz", arg, len) == 0)
return PGLZ_COMPRESS;
else if (pg_strncasecmp("none", arg, len) == 0)
return NONE_COMPRESS;
else
elog(ERROR, "invalid compress algorithm value \"%s\"", arg);
return NOT_DEFINED_COMPRESS;
}
const char*
deparse_compress_alg(int alg)
{
switch (alg)
{
case NONE_COMPRESS:
case NOT_DEFINED_COMPRESS:
return "none";
case ZLIB_COMPRESS:
return "zlib";
case PGLZ_COMPRESS:
return "pglz";
}
return NULL;
}
/*
* Fill pgBackup struct with default values.
*/
void
pgBackupInit(pgBackup *backup)
{
backup->backup_id = INVALID_BACKUP_ID;
backup->backup_mode = BACKUP_MODE_INVALID;
backup->status = BACKUP_STATUS_INVALID;
backup->tli = 0;
backup->start_lsn = 0;
backup->stop_lsn = 0;
backup->start_time = (time_t) 0;
backup->end_time = (time_t) 0;
backup->recovery_xid = 0;
backup->recovery_time = (time_t) 0;
backup->data_bytes = BYTES_INVALID;
backup->wal_bytes = BYTES_INVALID;
backup->compress_alg = COMPRESS_ALG_DEFAULT;
backup->compress_level = COMPRESS_LEVEL_DEFAULT;
backup->block_size = BLCKSZ;
backup->wal_block_size = XLOG_BLCKSZ;
backup->checksum_version = 0;
backup->stream = false;
backup->from_replica = false;
backup->parent_backup = INVALID_BACKUP_ID;
backup->parent_backup_link = NULL;
backup->primary_conninfo = NULL;
backup->program_version[0] = '\0';
backup->server_version[0] = '\0';
}
/*
* Copy backup metadata from **src** into **dst**.
*/
void
pgBackupCopy(pgBackup *dst, pgBackup *src)
{
pfree(dst->primary_conninfo);
memcpy(dst, src, sizeof(pgBackup));
if (src->primary_conninfo)
dst->primary_conninfo = pstrdup(src->primary_conninfo);
}
/* free pgBackup object */
void
pgBackupFree(void *backup)
{
free(backup);
pgBackup *b = (pgBackup *) backup;
pfree(b->primary_conninfo);
pfree(backup);
}
/* Compare two pgBackup with their IDs (start time) in ascending order */
@ -687,3 +867,48 @@ pgBackupGetPath2(const pgBackup *backup, char *path, size_t len,
make_native_path(path);
}
/* Find parent base FULL backup for current backup using parent_backup_link,
* return NULL if not found
*/
pgBackup*
find_parent_backup(pgBackup *current_backup)
{
pgBackup *base_full_backup = NULL;
base_full_backup = current_backup;
while (base_full_backup->backup_mode != BACKUP_MODE_FULL)
{
/*
* If we haven't found parent for incremental backup,
* mark it and all depending backups as orphaned
*/
if (base_full_backup->parent_backup_link == NULL
|| (base_full_backup->status != BACKUP_STATUS_OK
&& base_full_backup->status != BACKUP_STATUS_DONE))
{
pgBackup *orphaned_backup = current_backup;
while (orphaned_backup != NULL)
{
orphaned_backup->status = BACKUP_STATUS_ORPHAN;
pgBackupWriteBackupControlFile(orphaned_backup);
if (base_full_backup->parent_backup_link == NULL)
elog(WARNING, "Backup %s is orphaned because its parent backup is not found",
base36enc(orphaned_backup->start_time));
else
elog(WARNING, "Backup %s is orphaned because its parent backup is corrupted",
base36enc(orphaned_backup->start_time));
orphaned_backup = orphaned_backup->parent_backup_link;
}
base_full_backup = NULL;
break;
}
base_full_backup = base_full_backup->parent_backup_link;
}
return base_full_backup;
}

View File

@ -2,19 +2,38 @@
*
* configure.c: - manage backup catalog.
*
* Copyright (c) 2017-2017, Postgres Professional
* Copyright (c) 2017-2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include "utils/logger.h"
#include "pqexpbuffer.h"
#include "utils/json.h"
static void opt_log_level_console(pgut_option *opt, const char *arg);
static void opt_log_level_file(pgut_option *opt, const char *arg);
static void opt_compress_alg(pgut_option *opt, const char *arg);
static void show_configure_start(void);
static void show_configure_end(void);
static void show_configure(pgBackupConfig *config);
static void show_configure_json(pgBackupConfig *config);
static pgBackupConfig *cur_config = NULL;
static PQExpBufferData show_buf;
static int32 json_level = 0;
/*
* All this code needs refactoring.
*/
/* Set configure options */
int
do_configure(bool show_only)
@ -39,13 +58,17 @@ do_configure(bool show_only)
config->master_db = master_db;
if (master_user)
config->master_user = master_user;
if (replica_timeout != 300) /* 300 is default value */
if (replica_timeout)
config->replica_timeout = replica_timeout;
if (log_level_console != LOG_NONE)
config->log_level_console = LOG_LEVEL_CONSOLE;
if (log_level_file != LOG_NONE)
config->log_level_file = LOG_LEVEL_FILE;
if (archive_timeout)
config->archive_timeout = archive_timeout;
if (log_level_console)
config->log_level_console = log_level_console;
if (log_level_file)
config->log_level_file = log_level_file;
if (log_filename)
config->log_filename = log_filename;
if (error_log_filename)
@ -62,13 +85,13 @@ do_configure(bool show_only)
if (retention_window)
config->retention_window = retention_window;
if (compress_alg != NOT_DEFINED_COMPRESS)
if (compress_alg)
config->compress_alg = compress_alg;
if (compress_level != DEFAULT_COMPRESS_LEVEL)
if (compress_level)
config->compress_level = compress_level;
if (show_only)
writeBackupCatalogConfig(stderr, config);
show_configure(config);
else
writeBackupCatalogConfigFile(config);
@ -89,21 +112,23 @@ pgBackupConfigInit(pgBackupConfig *config)
config->master_port = NULL;
config->master_db = NULL;
config->master_user = NULL;
config->replica_timeout = INT_MIN; /* INT_MIN means "undefined" */
config->replica_timeout = REPLICA_TIMEOUT_DEFAULT;
config->log_level_console = INT_MIN; /* INT_MIN means "undefined" */
config->log_level_file = INT_MIN; /* INT_MIN means "undefined" */
config->log_filename = NULL;
config->archive_timeout = ARCHIVE_TIMEOUT_DEFAULT;
config->log_level_console = LOG_LEVEL_CONSOLE_DEFAULT;
config->log_level_file = LOG_LEVEL_FILE_DEFAULT;
config->log_filename = LOG_FILENAME_DEFAULT;
config->error_log_filename = NULL;
config->log_directory = NULL;
config->log_rotation_size = 0;
config->log_rotation_age = 0;
config->log_directory = LOG_DIRECTORY_DEFAULT;
config->log_rotation_size = LOG_ROTATION_SIZE_DEFAULT;
config->log_rotation_age = LOG_ROTATION_AGE_DEFAULT;
config->retention_redundancy = 0;
config->retention_window = 0;
config->retention_redundancy = RETENTION_REDUNDANCY_DEFAULT;
config->retention_window = RETENTION_WINDOW_DEFAULT;
config->compress_alg = NOT_DEFINED_COMPRESS;
config->compress_level = DEFAULT_COMPRESS_LEVEL;
config->compress_alg = COMPRESS_ALG_DEFAULT;
config->compress_level = COMPRESS_LEVEL_DEFAULT;
}
void
@ -114,7 +139,7 @@ writeBackupCatalogConfig(FILE *out, pgBackupConfig *config)
fprintf(out, "#Backup instance info\n");
fprintf(out, "PGDATA = %s\n", config->pgdata);
fprintf(out, "system-identifier = %li\n", config->system_identifier);
fprintf(out, "system-identifier = " UINT64_FORMAT "\n", config->system_identifier);
fprintf(out, "#Connection parameters:\n");
if (config->pgdatabase)
@ -136,55 +161,43 @@ writeBackupCatalogConfig(FILE *out, pgBackupConfig *config)
if (config->master_user)
fprintf(out, "master-user = %s\n", config->master_user);
if (config->replica_timeout != INT_MIN)
{
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_S,
&res, &unit);
fprintf(out, "replica-timeout = " UINT64_FORMAT "%s\n", res, unit);
}
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_S,
&res, &unit);
fprintf(out, "replica-timeout = " UINT64_FORMAT "%s\n", res, unit);
fprintf(out, "#Archive parameters:\n");
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_S,
&res, &unit);
fprintf(out, "archive-timeout = " UINT64_FORMAT "%s\n", res, unit);
fprintf(out, "#Logging parameters:\n");
if (config->log_level_console != INT_MIN)
fprintf(out, "log-level-console = %s\n", deparse_log_level(config->log_level_console));
if (config->log_level_file != INT_MIN)
fprintf(out, "log-level-file = %s\n", deparse_log_level(config->log_level_file));
if (config->log_filename)
fprintf(out, "log-filename = %s\n", config->log_filename);
fprintf(out, "log-level-console = %s\n", deparse_log_level(config->log_level_console));
fprintf(out, "log-level-file = %s\n", deparse_log_level(config->log_level_file));
fprintf(out, "log-filename = %s\n", config->log_filename);
if (config->error_log_filename)
fprintf(out, "error-log-filename = %s\n", config->error_log_filename);
if (config->log_directory)
fprintf(out, "log-directory = %s\n", config->log_directory);
/*
* Convert values from base unit
*/
if (config->log_rotation_size)
{
convert_from_base_unit_u(config->log_rotation_size, OPTION_UNIT_KB,
&res, &unit);
fprintf(out, "log-rotation-size = " UINT64_FORMAT "%s\n", res, unit);
}
if (config->log_rotation_age)
{
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_S,
&res, &unit);
fprintf(out, "log-rotation-age = " UINT64_FORMAT "%s\n", res, unit);
}
if (strcmp(config->log_directory, LOG_DIRECTORY_DEFAULT) == 0)
fprintf(out, "log-directory = %s/%s\n", backup_path, config->log_directory);
else
fprintf(out, "log-directory = %s\n", config->log_directory);
/* Convert values from base unit */
convert_from_base_unit_u(config->log_rotation_size, OPTION_UNIT_KB,
&res, &unit);
fprintf(out, "log-rotation-size = " UINT64_FORMAT "%s\n", res, (res)?unit:"KB");
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_S,
&res, &unit);
fprintf(out, "log-rotation-age = " UINT64_FORMAT "%s\n", res, (res)?unit:"min");
fprintf(out, "#Retention parameters:\n");
if (config->retention_redundancy)
fprintf(out, "retention-redundancy = %u\n", config->retention_redundancy);
if (config->retention_window)
fprintf(out, "retention-window = %u\n", config->retention_window);
fprintf(out, "retention-redundancy = %u\n", config->retention_redundancy);
fprintf(out, "retention-window = %u\n", config->retention_window);
fprintf(out, "#Compression parameters:\n");
fprintf(out, "compress-algorithm = %s\n", deparse_compress_alg(config->compress_alg));
if (compress_level != config->compress_level)
fprintf(out, "compress-level = %d\n", compress_level);
else
fprintf(out, "compress-level = %d\n", config->compress_level);
fprintf(out, "compress-level = %d\n", config->compress_level);
}
void
@ -240,6 +253,8 @@ readBackupCatalogConfigFile(void)
{ 'u', 0, "replica-timeout", &(config->replica_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
/* other options */
{ 'U', 0, "system-identifier", &(config->system_identifier), SOURCE_FILE_STRICT },
/* archive options */
{ 'u', 0, "archive-timeout", &(config->archive_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
{0}
};
@ -251,7 +266,6 @@ readBackupCatalogConfigFile(void)
pgut_readopt(path, options, ERROR);
return config;
}
static void
@ -271,3 +285,155 @@ opt_compress_alg(pgut_option *opt, const char *arg)
{
cur_config->compress_alg = parse_compress_alg(arg);
}
/*
* Initialize configure visualization.
*/
static void
show_configure_start(void)
{
if (show_format == SHOW_PLAIN)
return;
/* For now we need buffer only for JSON format */
json_level = 0;
initPQExpBuffer(&show_buf);
}
/*
* Finalize configure visualization.
*/
static void
show_configure_end(void)
{
if (show_format == SHOW_PLAIN)
return;
else
appendPQExpBufferChar(&show_buf, '\n');
fputs(show_buf.data, stdout);
termPQExpBuffer(&show_buf);
}
/*
* Show configure information of pg_probackup.
*/
static void
show_configure(pgBackupConfig *config)
{
show_configure_start();
if (show_format == SHOW_PLAIN)
writeBackupCatalogConfig(stdout, config);
else
show_configure_json(config);
show_configure_end();
}
/*
* Json output.
*/
static void
show_configure_json(pgBackupConfig *config)
{
PQExpBuffer buf = &show_buf;
uint64 res;
const char *unit;
json_add(buf, JT_BEGIN_OBJECT, &json_level);
json_add_value(buf, "pgdata", config->pgdata, json_level, false);
json_add_key(buf, "system-identifier", json_level, true);
appendPQExpBuffer(buf, UINT64_FORMAT, config->system_identifier);
/* Connection parameters */
if (config->pgdatabase)
json_add_value(buf, "pgdatabase", config->pgdatabase, json_level, true);
if (config->pghost)
json_add_value(buf, "pghost", config->pghost, json_level, true);
if (config->pgport)
json_add_value(buf, "pgport", config->pgport, json_level, true);
if (config->pguser)
json_add_value(buf, "pguser", config->pguser, json_level, true);
/* Replica parameters */
if (config->master_host)
json_add_value(buf, "master-host", config->master_host, json_level,
true);
if (config->master_port)
json_add_value(buf, "master-port", config->master_port, json_level,
true);
if (config->master_db)
json_add_value(buf, "master-db", config->master_db, json_level, true);
if (config->master_user)
json_add_value(buf, "master-user", config->master_user, json_level,
true);
json_add_key(buf, "replica-timeout", json_level, true);
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_S,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, unit);
/* Archive parameters */
json_add_key(buf, "archive-timeout", json_level, true);
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_S,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, unit);
/* Logging parameters */
json_add_value(buf, "log-level-console",
deparse_log_level(config->log_level_console), json_level,
true);
json_add_value(buf, "log-level-file",
deparse_log_level(config->log_level_file), json_level,
true);
json_add_value(buf, "log-filename", config->log_filename, json_level,
true);
if (config->error_log_filename)
json_add_value(buf, "error-log-filename", config->error_log_filename,
json_level, true);
if (strcmp(config->log_directory, LOG_DIRECTORY_DEFAULT) == 0)
{
char log_directory_fullpath[MAXPGPATH];
sprintf(log_directory_fullpath, "%s/%s",
backup_path, config->log_directory);
json_add_value(buf, "log-directory", log_directory_fullpath,
json_level, true);
}
else
json_add_value(buf, "log-directory", config->log_directory,
json_level, true);
json_add_key(buf, "log-rotation-size", json_level, true);
convert_from_base_unit_u(config->log_rotation_size, OPTION_UNIT_KB,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, (res)?unit:"KB");
json_add_key(buf, "log-rotation-age", json_level, true);
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_S,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, (res)?unit:"min");
/* Retention parameters */
json_add_key(buf, "retention-redundancy", json_level, true);
appendPQExpBuffer(buf, "%u", config->retention_redundancy);
json_add_key(buf, "retention-window", json_level, true);
appendPQExpBuffer(buf, "%u", config->retention_window);
/* Compression parameters */
json_add_value(buf, "compress-algorithm",
deparse_compress_alg(config->compress_alg), json_level,
true);
json_add_key(buf, "compress-level", json_level, true);
appendPQExpBuffer(buf, "%d", config->compress_level);
json_add(buf, JT_END_OBJECT, &json_level);
}

View File

@ -27,18 +27,24 @@
#ifdef HAVE_LIBZ
/* Implementation of zlib compression method */
static size_t zlib_compress(void* dst, size_t dst_size, void const* src, size_t src_size)
static int32
zlib_compress(void *dst, size_t dst_size, void const *src, size_t src_size,
int level)
{
uLongf compressed_size = dst_size;
int rc = compress2(dst, &compressed_size, src, src_size, compress_level);
uLongf compressed_size = dst_size;
int rc = compress2(dst, &compressed_size, src, src_size,
level);
return rc == Z_OK ? compressed_size : rc;
}
/* Implementation of zlib compression method */
static size_t zlib_decompress(void* dst, size_t dst_size, void const* src, size_t src_size)
static int32
zlib_decompress(void *dst, size_t dst_size, void const *src, size_t src_size)
{
uLongf dest_len = dst_size;
int rc = uncompress(dst, &dest_len, src, src_size);
uLongf dest_len = dst_size;
int rc = uncompress(dst, &dest_len, src, src_size);
return rc == Z_OK ? dest_len : rc;
}
#endif
@ -47,8 +53,9 @@ static size_t zlib_decompress(void* dst, size_t dst_size, void const* src, size_
* Compresses source into dest using algorithm. Returns the number of bytes
* written in the destination buffer, or -1 if compression fails.
*/
static size_t
do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, CompressAlg alg)
static int32
do_compress(void* dst, size_t dst_size, void const* src, size_t src_size,
CompressAlg alg, int level)
{
switch (alg)
{
@ -57,7 +64,7 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, Compre
return -1;
#ifdef HAVE_LIBZ
case ZLIB_COMPRESS:
return zlib_compress(dst, dst_size, src, src_size);
return zlib_compress(dst, dst_size, src, src_size, level);
#endif
case PGLZ_COMPRESS:
return pglz_compress(src, src_size, dst, PGLZ_strategy_always);
@ -70,8 +77,9 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, Compre
* Decompresses source into dest using algorithm. Returns the number of bytes
* decompressed in the destination buffer, or -1 if decompression fails.
*/
static size_t
do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size, CompressAlg alg)
static int32
do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size,
CompressAlg alg)
{
switch (alg)
{
@ -101,6 +109,7 @@ typedef struct BackupPageHeader
/* Special value for compressed_size field */
#define PageIsTruncated -2
#define SkipCurrentPage -3
/* Verify page's header */
static bool
@ -134,8 +143,8 @@ static int
read_page_from_file(pgFile *file, BlockNumber blknum,
FILE *in, Page page, XLogRecPtr *page_lsn)
{
off_t offset = blknum*BLCKSZ;
size_t read_len = 0;
off_t offset = blknum * BLCKSZ;
size_t read_len = 0;
/* read the block */
if (fseek(in, offset, SEEK_SET) != 0)
@ -216,31 +225,32 @@ read_page_from_file(pgFile *file, BlockNumber blknum,
}
/*
* Backup the specified block from a file of a relation.
* Verify page header and checksum of the page and write it
* to the backup file.
* Retrieves a page taking the backup mode into account
* and writes it into argument "page". Argument "page"
* should be a pointer to allocated BLCKSZ of bytes.
*
* Prints appropriate warnings/errors/etc into log.
* Returns 0 if page was successfully retrieved
* SkipCurrentPage(-3) if we need to skip this page
* PageIsTruncated(-2) if the page was truncated
*/
static void
backup_data_page(backup_files_args *arguments,
pgFile *file, XLogRecPtr prev_backup_start_lsn,
BlockNumber blknum, BlockNumber nblocks,
FILE *in, FILE *out,
pg_crc32 *crc, int *n_skipped,
BackupMode backup_mode)
static int32
prepare_page(backup_files_arg *arguments,
pgFile *file, XLogRecPtr prev_backup_start_lsn,
BlockNumber blknum, BlockNumber nblocks,
FILE *in, int *n_skipped,
BackupMode backup_mode,
Page page)
{
BackupPageHeader header;
Page page = malloc(BLCKSZ);
Page compressed_page = NULL;
XLogRecPtr page_lsn = 0;
size_t write_buffer_size;
char write_buffer[BLCKSZ+sizeof(header)];
int try_again = 100;
bool page_is_valid = false;
XLogRecPtr page_lsn = 0;
int try_again = 100;
bool page_is_valid = false;
bool page_is_truncated = false;
BlockNumber absolute_blknum = file->segno * RELSEG_SIZE + blknum;
header.block = blknum;
header.compressed_size = 0;
/* check for interrupt */
if (interrupted)
elog(ERROR, "Interrupted during backup");
/*
* Read the page and verify its header and checksum.
@ -258,7 +268,7 @@ backup_data_page(backup_files_args *arguments,
if (result == 0)
{
/* This block was truncated.*/
header.compressed_size = PageIsTruncated;
page_is_truncated = true;
/* Page is not actually valid, but it is absent
* and we're not going to reread it or validate */
page_is_valid = true;
@ -291,35 +301,38 @@ backup_data_page(backup_files_args *arguments,
if (backup_mode == BACKUP_MODE_DIFF_PTRACK || (!page_is_valid && is_ptrack_support))
{
size_t page_size = 0;
free(page);
page = NULL;
page = (Page) pg_ptrack_get_block(arguments, file->dbOid, file->tblspcOid,
Page ptrack_page = NULL;
ptrack_page = (Page) pg_ptrack_get_block(arguments, file->dbOid, file->tblspcOid,
file->relOid, absolute_blknum, &page_size);
if (page == NULL)
if (ptrack_page == NULL)
{
/* This block was truncated.*/
header.compressed_size = PageIsTruncated;
page_is_truncated = true;
}
else if (page_size != BLCKSZ)
{
free(ptrack_page);
elog(ERROR, "File: %s, block %u, expected block size %d, but read %lu",
file->path, absolute_blknum, BLCKSZ, page_size);
}
else
{
/*
* We need to copy the page that was successfully
* retreieved from ptrack into our output "page" parameter.
* We must set checksum here, because it is outdated
* in the block recieved from shared buffers.
*/
memcpy(page, ptrack_page, BLCKSZ);
free(ptrack_page);
if (is_checksum_enabled)
((PageHeader) page)->pd_checksum = pg_checksum_page(page, absolute_blknum);
}
/* get lsn from page, provided by pg_ptrack_get_block() */
if (backup_mode == BACKUP_MODE_DIFF_DELTA &&
file->exists_in_prev &&
header.compressed_size != PageIsTruncated &&
!page_is_truncated &&
!parse_page(page, &page_lsn))
elog(ERROR, "Cannot parse page after pg_ptrack_get_block. "
"Possible risk of a memory corruption");
@ -328,76 +341,91 @@ backup_data_page(backup_files_args *arguments,
if (backup_mode == BACKUP_MODE_DIFF_DELTA &&
file->exists_in_prev &&
header.compressed_size != PageIsTruncated &&
!page_is_truncated &&
page_lsn < prev_backup_start_lsn)
{
elog(VERBOSE, "Skipping blknum: %u in file: %s", blknum, file->path);
(*n_skipped)++;
free(page);
return SkipCurrentPage;
}
if (page_is_truncated)
return PageIsTruncated;
return 0;
}
static void
compress_and_backup_page(pgFile *file, BlockNumber blknum,
FILE *in, FILE *out, pg_crc32 *crc,
int page_state, Page page,
CompressAlg calg, int clevel)
{
BackupPageHeader header;
size_t write_buffer_size = sizeof(header);
char write_buffer[BLCKSZ+sizeof(header)];
char compressed_page[BLCKSZ];
if(page_state == SkipCurrentPage)
return;
}
if (header.compressed_size != PageIsTruncated)
header.block = blknum;
header.compressed_size = page_state;
if(page_state == PageIsTruncated)
{
file->read_size += BLCKSZ;
compressed_page = malloc(BLCKSZ);
/*
* The page was truncated. Write only header
* to know that we must truncate restored file
*/
memcpy(write_buffer, &header, sizeof(header));
}
else
{
/* The page was not truncated, so we need to compress it */
header.compressed_size = do_compress(compressed_page, BLCKSZ,
page, BLCKSZ, compress_alg);
file->compress_alg = compress_alg;
page, BLCKSZ, calg, clevel);
file->compress_alg = calg;
file->read_size += BLCKSZ;
Assert (header.compressed_size <= BLCKSZ);
}
write_buffer_size = sizeof(header);
/*
* The page was truncated. Write only header
* to know that we must truncate restored file
*/
if (header.compressed_size == PageIsTruncated)
{
memcpy(write_buffer, &header, sizeof(header));
}
/* The page compression failed. Write it as is. */
else if (header.compressed_size == -1)
{
header.compressed_size = BLCKSZ;
memcpy(write_buffer, &header, sizeof(header));
memcpy(write_buffer + sizeof(header), page, BLCKSZ);
write_buffer_size += header.compressed_size;
}
/* The page was successfully compressed */
else if (header.compressed_size > 0)
{
memcpy(write_buffer, &header, sizeof(header));
memcpy(write_buffer + sizeof(header), compressed_page, header.compressed_size);
write_buffer_size += MAXALIGN(header.compressed_size);
/* The page was successfully compressed. */
if (header.compressed_size > 0)
{
memcpy(write_buffer, &header, sizeof(header));
memcpy(write_buffer + sizeof(header),
compressed_page, header.compressed_size);
write_buffer_size += MAXALIGN(header.compressed_size);
}
/* Nonpositive value means that compression failed. Write it as is. */
else
{
header.compressed_size = BLCKSZ;
memcpy(write_buffer, &header, sizeof(header));
memcpy(write_buffer + sizeof(header), page, BLCKSZ);
write_buffer_size += header.compressed_size;
}
}
/* elog(VERBOSE, "backup blkno %u, compressed_size %d write_buffer_size %ld",
blknum, header.compressed_size, write_buffer_size); */
/* Update CRC */
COMP_CRC32C(*crc, &write_buffer, write_buffer_size);
COMP_CRC32C(*crc, write_buffer, write_buffer_size);
/* write data page */
if(fwrite(write_buffer, 1, write_buffer_size, out) != write_buffer_size)
{
int errno_tmp = errno;
int errno_tmp = errno;
fclose(in);
fclose(out);
elog(ERROR, "File: %s, cannot write backup at block %u : %s",
file->path, blknum, strerror(errno_tmp));
file->path, blknum, strerror(errno_tmp));
}
file->write_size += write_buffer_size;
if (page != NULL)
free(page);
if (compressed_page != NULL)
free(compressed_page);
}
/*
@ -409,18 +437,19 @@ backup_data_page(backup_files_args *arguments,
* backup with special header.
*/
bool
backup_data_file(backup_files_args* arguments,
const char *from_root, const char *to_root,
pgFile *file, XLogRecPtr prev_backup_start_lsn,
BackupMode backup_mode)
backup_data_file(backup_files_arg* arguments,
const char *to_path, pgFile *file,
XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
CompressAlg calg, int clevel)
{
char to_path[MAXPGPATH];
FILE *in;
FILE *out;
BlockNumber blknum = 0;
BlockNumber nblocks = 0;
int n_blocks_skipped = 0;
int n_blocks_read = 0;
FILE *in;
FILE *out;
BlockNumber blknum = 0;
BlockNumber nblocks = 0;
int n_blocks_skipped = 0;
int n_blocks_read = 0;
int page_state;
char curr_page[BLCKSZ];
/*
* Skip unchanged file only if it exists in previous backup.
@ -430,7 +459,7 @@ backup_data_file(backup_files_args* arguments,
if ((backup_mode == BACKUP_MODE_DIFF_PAGE ||
backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
file->pagemap.bitmapsize == PageBitmapIsEmpty &&
file->exists_in_prev)
file->exists_in_prev && !file->pagemap_isabsent)
{
/*
* There are no changed blocks since last backup. We want make
@ -446,7 +475,7 @@ backup_data_file(backup_files_args* arguments,
INIT_CRC32C(file->crc);
/* open backup mode file for read */
in = fopen(file->path, "r");
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
{
FIN_CRC32C(file->crc);
@ -479,8 +508,7 @@ backup_data_file(backup_files_args* arguments,
nblocks = file->size/BLCKSZ;
/* open backup file for write */
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
out = fopen(to_path, "w");
out = fopen(to_path, PG_BINARY_W);
if (out == NULL)
{
int errno_tmp = errno;
@ -493,32 +521,45 @@ backup_data_file(backup_files_args* arguments,
* Read each page, verify checksum and write it to backup.
* If page map is empty or file is not present in previous backup
* backup all pages of the relation.
*
* We will enter here if backup_mode is FULL or DELTA.
*/
if (file->pagemap.bitmapsize == PageBitmapIsEmpty
|| file->pagemap.bitmapsize == PageBitmapIsAbsent
|| !file->exists_in_prev)
if (file->pagemap.bitmapsize == PageBitmapIsEmpty ||
file->pagemap_isabsent || !file->exists_in_prev)
{
for (blknum = 0; blknum < nblocks; blknum++)
{
backup_data_page(arguments, file, prev_backup_start_lsn, blknum,
nblocks, in, out, &(file->crc),
&n_blocks_skipped, backup_mode);
page_state = prepare_page(arguments, file, prev_backup_start_lsn,
blknum, nblocks, in, &n_blocks_skipped,
backup_mode, curr_page);
compress_and_backup_page(file, blknum, in, out, &(file->crc),
page_state, curr_page, calg, clevel);
n_blocks_read++;
if (page_state == PageIsTruncated)
break;
}
if (backup_mode == BACKUP_MODE_DIFF_DELTA)
file->n_blocks = n_blocks_read;
}
/* If page map is not empty we scan only changed blocks, */
/*
* If page map is not empty we scan only changed blocks.
*
* We will enter here if backup_mode is PAGE or PTRACK.
*/
else
{
datapagemap_iterator_t *iter;
iter = datapagemap_iterate(&file->pagemap);
while (datapagemap_next(iter, &blknum))
{
backup_data_page(arguments, file, prev_backup_start_lsn, blknum,
nblocks, in, out, &(file->crc),
&n_blocks_skipped, backup_mode);
page_state = prepare_page(arguments, file, prev_backup_start_lsn,
blknum, nblocks, in, &n_blocks_skipped,
backup_mode, curr_page);
compress_and_backup_page(file, blknum, in, out, &(file->crc),
page_state, curr_page, calg, clevel);
n_blocks_read++;
if (page_state == PageIsTruncated)
break;
}
pg_free(file->pagemap.bitmap);
@ -562,25 +603,26 @@ backup_data_file(backup_files_args* arguments,
/*
* Restore files in the from_root directory to the to_root directory with
* same relative path.
*
* If write_header is true then we add header to each restored block, currently
* it is used for MERGE command.
*/
void
restore_data_file(const char *from_root,
const char *to_root,
pgFile *file,
pgBackup *backup)
restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
bool write_header)
{
char to_path[MAXPGPATH];
FILE *in = NULL;
FILE *out = NULL;
BackupPageHeader header;
BlockNumber blknum;
size_t file_size;
FILE *in = NULL;
FILE *out = NULL;
BackupPageHeader header;
BlockNumber blknum = 0,
truncate_from = 0;
bool need_truncate = false;
/* BYTES_INVALID allowed only in case of restoring file from DELTA backup */
if (file->write_size != BYTES_INVALID)
{
/* open backup mode file for read */
in = fopen(file->path, "r");
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
{
elog(ERROR, "cannot open backup file \"%s\": %s", file->path,
@ -593,10 +635,9 @@ restore_data_file(const char *from_root,
* modified pages for differential restore. If the file does not exist,
* re-open it with "w" to create an empty file.
*/
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
out = fopen(to_path, "r+");
out = fopen(to_path, PG_BINARY_R "+");
if (out == NULL && errno == ENOENT)
out = fopen(to_path, "w");
out = fopen(to_path, PG_BINARY_W);
if (out == NULL)
{
int errno_tmp = errno;
@ -605,8 +646,9 @@ restore_data_file(const char *from_root,
to_path, strerror(errno_tmp));
}
for (blknum = 0; ; blknum++)
while (true)
{
off_t write_pos;
size_t read_len;
DataPage compressed_page; /* used as read buffer */
DataPage page;
@ -615,6 +657,21 @@ restore_data_file(const char *from_root,
if (file->write_size == BYTES_INVALID)
break;
/*
* We need to truncate result file if data file in a incremental backup
* less than data file in a full backup. We know it thanks to n_blocks.
*
* It may be equal to -1, then we don't want to truncate the result
* file.
*/
if (file->n_blocks != BLOCKNUM_INVALID &&
(blknum + 1) > file->n_blocks)
{
truncate_from = blknum;
need_truncate = true;
break;
}
/* read BackupPageHeader */
read_len = fread(&header, 1, sizeof(header), in);
if (read_len != sizeof(header))
@ -632,18 +689,19 @@ restore_data_file(const char *from_root,
}
if (header.block < blknum)
elog(ERROR, "backup is broken at file->path %s block %u",file->path, blknum);
elog(ERROR, "backup is broken at file->path %s block %u",
file->path, blknum);
blknum = header.block;
if (header.compressed_size == PageIsTruncated)
{
/*
* Backup contains information that this block was truncated.
* Truncate file to this length.
* We need to truncate file to this length.
*/
if (ftruncate(fileno(out), header.block * BLCKSZ) != 0)
elog(ERROR, "cannot truncate \"%s\": %s",
file->path, strerror(errno));
elog(VERBOSE, "truncate file %s to block %u", file->path, header.block);
truncate_from = blknum;
need_truncate = true;
break;
}
@ -657,64 +715,89 @@ restore_data_file(const char *from_root,
if (header.compressed_size != BLCKSZ)
{
size_t uncompressed_size = 0;
int32 uncompressed_size = 0;
uncompressed_size = do_decompress(page.data, BLCKSZ,
compressed_page.data,
header.compressed_size, file->compress_alg);
compressed_page.data,
MAXALIGN(header.compressed_size),
file->compress_alg);
if (uncompressed_size != BLCKSZ)
elog(ERROR, "page uncompressed to %ld bytes. != BLCKSZ", uncompressed_size);
elog(ERROR, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
file->path, uncompressed_size);
}
write_pos = (write_header) ? blknum * (BLCKSZ + sizeof(header)) :
blknum * BLCKSZ;
/*
* Seek and write the restored page.
*/
blknum = header.block;
if (fseek(out, blknum * BLCKSZ, SEEK_SET) < 0)
if (fseek(out, write_pos, SEEK_SET) < 0)
elog(ERROR, "cannot seek block %u of \"%s\": %s",
blknum, to_path, strerror(errno));
if (write_header)
{
if (fwrite(&header, 1, sizeof(header), out) != sizeof(header))
elog(ERROR, "cannot write header of block %u of \"%s\": %s",
blknum, file->path, strerror(errno));
}
if (header.compressed_size < BLCKSZ)
{
if (fwrite(page.data, 1, BLCKSZ, out) != BLCKSZ)
elog(ERROR, "cannot write block %u of \"%s\": %s",
blknum, file->path, strerror(errno));
blknum, file->path, strerror(errno));
}
else
{
/* if page wasn't compressed, we've read full block */
if (fwrite(compressed_page.data, 1, BLCKSZ, out) != BLCKSZ)
elog(ERROR, "cannot write block %u of \"%s\": %s",
blknum, file->path, strerror(errno));
blknum, file->path, strerror(errno));
}
}
/*
* DELTA backup have no knowledge about truncated blocks as PAGE or PTRACK do
* But during DELTA backup we read every file in PGDATA and thus DELTA backup
* knows exact size of every file at the time of backup.
* So when restoring file from DELTA backup we, knowning it`s size at
* a time of a backup, can truncate file to this size.
*/
if (backup->backup_mode == BACKUP_MODE_DIFF_DELTA)
/*
* DELTA backup have no knowledge about truncated blocks as PAGE or PTRACK do
* But during DELTA backup we read every file in PGDATA and thus DELTA backup
* knows exact size of every file at the time of backup.
* So when restoring file from DELTA backup we, knowning it`s size at
* a time of a backup, can truncate file to this size.
*/
if (allow_truncate && file->n_blocks != BLOCKNUM_INVALID && !need_truncate)
{
size_t file_size = 0;
/* get file current size */
fseek(out, 0, SEEK_END);
file_size = ftell(out);
if (file_size > file->n_blocks * BLCKSZ)
{
/*
* Truncate file to this length.
*/
if (ftruncate(fileno(out), file->n_blocks * BLCKSZ) != 0)
elog(ERROR, "cannot truncate \"%s\": %s",
file->path, strerror(errno));
elog(INFO, "Delta truncate file %s to block %u", file->path, file->n_blocks);
truncate_from = file->n_blocks;
need_truncate = true;
}
}
if (need_truncate)
{
off_t write_pos;
write_pos = (write_header) ? truncate_from * (BLCKSZ + sizeof(header)) :
truncate_from * BLCKSZ;
/*
* Truncate file to this length.
*/
if (ftruncate(fileno(out), write_pos) != 0)
elog(ERROR, "cannot truncate \"%s\": %s",
file->path, strerror(errno));
elog(INFO, "Delta truncate file %s to block %u",
file->path, truncate_from);
}
/* update file permission */
if (chmod(to_path, file->mode) == -1)
{
@ -759,7 +842,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
file->write_size = 0;
/* open backup mode file for read */
in = fopen(file->path, "r");
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
{
FIN_CRC32C(crc);
@ -775,7 +858,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
/* open backup file for write */
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
out = fopen(to_path, "w");
out = fopen(to_path, PG_BINARY_W);
if (out == NULL)
{
int errno_tmp = errno;
@ -843,7 +926,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
file->read_size += read_len;
}
file->write_size = file->read_size;
file->write_size = (int64) file->read_size;
/* finish CRC calculation and store into pgFile */
FIN_CRC32C(crc);
file->crc = crc;
@ -867,6 +950,22 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
return true;
}
/*
* Move file from one backup to another.
* We do not apply compression to these files, because
* it is either small control file or already compressed cfs file.
*/
void
move_file(const char *from_root, const char *to_root, pgFile *file)
{
char to_path[MAXPGPATH];
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
if (rename(file->path, to_path) == -1)
elog(ERROR, "Cannot move file \"%s\" to path \"%s\": %s",
file->path, to_path, strerror(errno));
}
#ifdef HAVE_LIBZ
/*
* Show error during work with compressed file
@ -918,7 +1017,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
bool overwrite)
{
FILE *in = NULL;
FILE *out;
FILE *out=NULL;
char buf[XLOG_BLCKSZ];
const char *to_path_p = to_path;
char to_path_temp[MAXPGPATH];
@ -930,7 +1029,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
#endif
/* open file for read */
in = fopen(from_path, "r");
in = fopen(from_path, PG_BINARY_R);
if (in == NULL)
elog(ERROR, "Cannot open source WAL file \"%s\": %s", from_path,
strerror(errno));
@ -946,7 +1045,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", gz_to_path);
gz_out = gzopen(to_path_temp, "wb");
gz_out = gzopen(to_path_temp, PG_BINARY_W);
if (gzsetparams(gz_out, compress_level, Z_DEFAULT_STRATEGY) != Z_OK)
elog(ERROR, "Cannot set compression level %d to file \"%s\": %s",
compress_level, to_path_temp, get_gz_error(gz_out, errno));
@ -961,7 +1060,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", to_path);
out = fopen(to_path_temp, "w");
out = fopen(to_path_temp, PG_BINARY_W);
if (out == NULL)
elog(ERROR, "Cannot open destination WAL file \"%s\": %s",
to_path_temp, strerror(errno));
@ -1083,7 +1182,7 @@ get_wal_file(const char *from_path, const char *to_path)
#endif
/* open file for read */
in = fopen(from_path, "r");
in = fopen(from_path, PG_BINARY_R);
if (in == NULL)
{
#ifdef HAVE_LIBZ
@ -1092,7 +1191,7 @@ get_wal_file(const char *from_path, const char *to_path)
* extension.
*/
snprintf(gz_from_path, sizeof(gz_from_path), "%s.gz", from_path);
gz_in = gzopen(gz_from_path, "rb");
gz_in = gzopen(gz_from_path, PG_BINARY_R);
if (gz_in == NULL)
{
if (errno == ENOENT)
@ -1120,7 +1219,7 @@ get_wal_file(const char *from_path, const char *to_path)
/* open backup file for write */
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", to_path);
out = fopen(to_path_temp, "w");
out = fopen(to_path_temp, PG_BINARY_W);
if (out == NULL)
elog(ERROR, "Cannot open destination WAL file \"%s\": %s",
to_path_temp, strerror(errno));
@ -1254,7 +1353,7 @@ calc_file_checksum(pgFile *file)
file->write_size = 0;
/* open backup mode file for read */
in = fopen(file->path, "r");
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
{
FIN_CRC32C(crc);

View File

@ -33,8 +33,6 @@ do_delete(time_t backup_id)
/* Get complete list of backups */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
if (backup_list == NULL)
elog(ERROR, "Failed to get backup list.");
if (backup_id != 0)
{
@ -141,7 +139,8 @@ do_retention_purge(void)
if (retention_window > 0)
elog(LOG, "WINDOW=%u", retention_window);
if (retention_redundancy == 0 && retention_window == 0)
if (retention_redundancy == 0
&& retention_window == 0)
{
elog(WARNING, "Retention policy is not set");
if (!delete_wal)
@ -161,7 +160,8 @@ do_retention_purge(void)
}
/* Find target backups to be deleted */
if (delete_expired && (retention_redundancy > 0 || retention_window > 0))
if (delete_expired &&
(retention_redundancy > 0 || retention_window > 0))
{
backup_num = 0;
for (i = 0; i < parray_num(backup_list); i++)
@ -173,13 +173,13 @@ do_retention_purge(void)
if (backup->status != BACKUP_STATUS_OK)
continue;
/*
* When a validate full backup was found, we can delete the
* When a valid full backup was found, we can delete the
* backup that is older than it using the number of generations.
*/
if (backup->backup_mode == BACKUP_MODE_FULL)
backup_num++;
/* Evaluateretention_redundancy if this backup is eligible for removal */
/* Evaluate retention_redundancy if this backup is eligible for removal */
if (keep_next_backup ||
retention_redundancy >= backup_num_evaluate + 1 ||
(retention_window > 0 && backup->recovery_time >= days_threshold))
@ -200,6 +200,7 @@ do_retention_purge(void)
continue;
}
/* Delete backup and update status to DELETED */
pgBackupDeleteFiles(backup);
backup_deleted = true;
@ -259,7 +260,8 @@ pgBackupDeleteFiles(pgBackup *backup)
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
elog(INFO, "delete: %s %s", base36enc(backup->start_time), timestamp);
elog(INFO, "delete: %s %s",
base36enc(backup->start_time), timestamp);
/*
* Update STATUS to BACKUP_STATUS_DELETING in preparation for the case which
@ -281,7 +283,7 @@ pgBackupDeleteFiles(pgBackup *backup)
/* print progress */
elog(VERBOSE, "delete file(%zd/%lu) \"%s\"", i + 1,
(unsigned long) parray_num(files), file->path);
(unsigned long) parray_num(files), file->path);
if (remove(file->path))
{

482
src/dir.c
View File

@ -10,7 +10,6 @@
#include "pg_probackup.h"
#include <libgen.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
@ -88,6 +87,34 @@ static char *pgdata_exclude_files_non_exclusive[] =
NULL
};
/* Tablespace mapping structures */
typedef struct TablespaceListCell
{
struct TablespaceListCell *next;
char old_dir[MAXPGPATH];
char new_dir[MAXPGPATH];
} TablespaceListCell;
typedef struct TablespaceList
{
TablespaceListCell *head;
TablespaceListCell *tail;
} TablespaceList;
typedef struct TablespaceCreatedListCell
{
struct TablespaceCreatedListCell *next;
char link_name[MAXPGPATH];
char linked_dir[MAXPGPATH];
} TablespaceCreatedListCell;
typedef struct TablespaceCreatedList
{
TablespaceCreatedListCell *head;
TablespaceCreatedListCell *tail;
} TablespaceCreatedList;
static int BlackListCompare(const void *str1, const void *str2);
static bool dir_check_file(const char *root, pgFile *file);
@ -95,17 +122,23 @@ static void dir_list_file_internal(parray *files, const char *root,
pgFile *parent, bool exclude,
bool omit_symlink, parray *black_list);
static void list_data_directories(parray *files, const char *path, bool is_root,
bool exclude);
/* Tablespace mapping */
static TablespaceList tablespace_dirs = {NULL, NULL};
static TablespaceCreatedList tablespace_created_dirs = {NULL, NULL};
/*
* Create directory, also create parent directories if necessary.
*/
int
dir_create_dir(const char *dir, mode_t mode)
{
char copy[MAXPGPATH];
char parent[MAXPGPATH];
strncpy(copy, dir, MAXPGPATH);
strncpy(parent, dirname(copy), MAXPGPATH);
strncpy(parent, dir, MAXPGPATH);
get_parent_directory(parent);
/* Create parent first */
if (access(parent, F_OK) == -1)
@ -153,6 +186,8 @@ pgFileInit(const char *path)
file = (pgFile *) pgut_malloc(sizeof(pgFile));
file->name = NULL;
file->size = 0;
file->mode = 0;
file->read_size = 0;
@ -161,7 +196,8 @@ pgFileInit(const char *path)
file->is_datafile = false;
file->linked = NULL;
file->pagemap.bitmap = NULL;
file->pagemap.bitmapsize = PageBitmapIsAbsent;
file->pagemap.bitmapsize = PageBitmapIsEmpty;
file->pagemap_isabsent = false;
file->tblspcOid = 0;
file->dbOid = 0;
file->relOid = 0;
@ -185,7 +221,8 @@ pgFileInit(const char *path)
file->is_cfs = false;
file->exists_in_prev = false; /* can change only in Incremental backup. */
file->n_blocks = -1; /* can change only in DELTA backup. Number of blocks readed during backup */
/* Number of blocks readed during backup */
file->n_blocks = BLOCKNUM_INVALID;
file->compress_alg = NOT_DEFINED_COMPRESS;
return file;
}
@ -223,7 +260,7 @@ delete_file:
}
pg_crc32
pgFileGetCRC(pgFile *file)
pgFileGetCRC(const char *file_path)
{
FILE *fp;
pg_crc32 crc = 0;
@ -232,10 +269,10 @@ pgFileGetCRC(pgFile *file)
int errno_tmp;
/* open file in binary read mode */
fp = fopen(file->path, "r");
fp = fopen(file_path, PG_BINARY_R);
if (fp == NULL)
elog(ERROR, "cannot open file \"%s\": %s",
file->path, strerror(errno));
file_path, strerror(errno));
/* calc CRC of backup file */
INIT_CRC32C(crc);
@ -247,7 +284,7 @@ pgFileGetCRC(pgFile *file)
}
errno_tmp = errno;
if (!feof(fp))
elog(WARNING, "cannot read \"%s\": %s", file->path,
elog(WARNING, "cannot read \"%s\": %s", file_path,
strerror(errno_tmp));
if (len > 0)
COMP_CRC32C(crc, buf, len);
@ -350,7 +387,7 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
char black_item[MAXPGPATH * 2];
black_list = parray_new();
black_list_file = fopen(path, "r");
black_list_file = fopen(path, PG_BINARY_R);
if (black_list_file == NULL)
elog(ERROR, "cannot open black_list: %s", strerror(errno));
@ -385,7 +422,6 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
parray_append(files, file);
dir_list_file_internal(files, root, file, exclude, omit_symlink, black_list);
parray_qsort(files, pgFileComparePath);
}
/*
@ -676,7 +712,7 @@ dir_list_file_internal(parray *files, const char *root, pgFile *parent,
* **is_root** is a little bit hack. We exclude only first level of directories
* and on the first level we check all files and directories.
*/
void
static void
list_data_directories(parray *files, const char *path, bool is_root,
bool exclude)
{
@ -748,6 +784,277 @@ list_data_directories(parray *files, const char *path, bool is_root,
path, strerror(prev_errno));
}
/*
* Save create directory path into memory. We can use it in next page restore to
* not raise the error "restore tablespace destination is not empty" in
* create_data_directories().
*/
static void
set_tablespace_created(const char *link, const char *dir)
{
TablespaceCreatedListCell *cell = pgut_new(TablespaceCreatedListCell);
strcpy(cell->link_name, link);
strcpy(cell->linked_dir, dir);
cell->next = NULL;
if (tablespace_created_dirs.tail)
tablespace_created_dirs.tail->next = cell;
else
tablespace_created_dirs.head = cell;
tablespace_created_dirs.tail = cell;
}
/*
* Retrieve tablespace path, either relocated or original depending on whether
* -T was passed or not.
*
* Copy of function get_tablespace_mapping() from pg_basebackup.c.
*/
static const char *
get_tablespace_mapping(const char *dir)
{
TablespaceListCell *cell;
for (cell = tablespace_dirs.head; cell; cell = cell->next)
if (strcmp(dir, cell->old_dir) == 0)
return cell->new_dir;
return dir;
}
/*
* Is directory was created when symlink was created in restore_directories().
*/
static const char *
get_tablespace_created(const char *link)
{
TablespaceCreatedListCell *cell;
for (cell = tablespace_created_dirs.head; cell; cell = cell->next)
if (strcmp(link, cell->link_name) == 0)
return cell->linked_dir;
return NULL;
}
/*
* Split argument into old_dir and new_dir and append to tablespace mapping
* list.
*
* Copy of function tablespace_list_append() from pg_basebackup.c.
*/
void
opt_tablespace_map(pgut_option *opt, const char *arg)
{
TablespaceListCell *cell = pgut_new(TablespaceListCell);
char *dst;
char *dst_ptr;
const char *arg_ptr;
dst_ptr = dst = cell->old_dir;
for (arg_ptr = arg; *arg_ptr; arg_ptr++)
{
if (dst_ptr - dst >= MAXPGPATH)
elog(ERROR, "directory name too long");
if (*arg_ptr == '\\' && *(arg_ptr + 1) == '=')
; /* skip backslash escaping = */
else if (*arg_ptr == '=' && (arg_ptr == arg || *(arg_ptr - 1) != '\\'))
{
if (*cell->new_dir)
elog(ERROR, "multiple \"=\" signs in tablespace mapping\n");
else
dst = dst_ptr = cell->new_dir;
}
else
*dst_ptr++ = *arg_ptr;
}
if (!*cell->old_dir || !*cell->new_dir)
elog(ERROR, "invalid tablespace mapping format \"%s\", "
"must be \"OLDDIR=NEWDIR\"", arg);
/*
* This check isn't absolutely necessary. But all tablespaces are created
* with absolute directories, so specifying a non-absolute path here would
* just never match, possibly confusing users. It's also good to be
* consistent with the new_dir check.
*/
if (!is_absolute_path(cell->old_dir))
elog(ERROR, "old directory is not an absolute path in tablespace mapping: %s\n",
cell->old_dir);
if (!is_absolute_path(cell->new_dir))
elog(ERROR, "new directory is not an absolute path in tablespace mapping: %s\n",
cell->new_dir);
if (tablespace_dirs.tail)
tablespace_dirs.tail->next = cell;
else
tablespace_dirs.head = cell;
tablespace_dirs.tail = cell;
}
/*
* Create backup directories from **backup_dir** to **data_dir**. Doesn't raise
* an error if target directories exist.
*
* If **extract_tablespaces** is true then try to extract tablespace data
* directories into their initial path using tablespace_map file.
*/
void
create_data_directories(const char *data_dir, const char *backup_dir,
bool extract_tablespaces)
{
parray *dirs,
*links = NULL;
size_t i;
char backup_database_dir[MAXPGPATH],
to_path[MAXPGPATH];
dirs = parray_new();
if (extract_tablespaces)
{
links = parray_new();
read_tablespace_map(links, backup_dir);
}
join_path_components(backup_database_dir, backup_dir, DATABASE_DIR);
list_data_directories(dirs, backup_database_dir, true, false);
elog(LOG, "restore directories and symlinks...");
for (i = 0; i < parray_num(dirs); i++)
{
pgFile *dir = (pgFile *) parray_get(dirs, i);
char *relative_ptr = GetRelativePath(dir->path, backup_database_dir);
Assert(S_ISDIR(dir->mode));
/* Try to create symlink and linked directory if necessary */
if (extract_tablespaces &&
path_is_prefix_of_path(PG_TBLSPC_DIR, relative_ptr))
{
char *link_ptr = GetRelativePath(relative_ptr, PG_TBLSPC_DIR),
*link_sep,
*tmp_ptr;
char link_name[MAXPGPATH];
pgFile **link;
/* Extract link name from relative path */
link_sep = first_dir_separator(link_ptr);
if (link_sep != NULL)
{
int len = link_sep - link_ptr;
strncpy(link_name, link_ptr, len);
link_name[len] = '\0';
}
else
goto create_directory;
tmp_ptr = dir->path;
dir->path = link_name;
/* Search only by symlink name without path */
link = (pgFile **) parray_bsearch(links, dir, pgFileComparePath);
dir->path = tmp_ptr;
if (link)
{
const char *linked_path = get_tablespace_mapping((*link)->linked);
const char *dir_created;
if (!is_absolute_path(linked_path))
elog(ERROR, "tablespace directory is not an absolute path: %s\n",
linked_path);
/* Check if linked directory was created earlier */
dir_created = get_tablespace_created(link_name);
if (dir_created)
{
/*
* If symlink and linked directory were created do not
* create it second time.
*/
if (strcmp(dir_created, linked_path) == 0)
{
/*
* Create rest of directories.
* First check is there any directory name after
* separator.
*/
if (link_sep != NULL && *(link_sep + 1) != '\0')
goto create_directory;
else
continue;
}
else
elog(ERROR, "tablespace directory \"%s\" of page backup does not "
"match with previous created tablespace directory \"%s\" of symlink \"%s\"",
linked_path, dir_created, link_name);
}
/*
* This check was done in check_tablespace_mapping(). But do
* it again.
*/
if (!dir_is_empty(linked_path))
elog(ERROR, "restore tablespace destination is not empty: \"%s\"",
linked_path);
if (link_sep)
elog(LOG, "create directory \"%s\" and symbolic link \"%.*s\"",
linked_path,
(int) (link_sep - relative_ptr), relative_ptr);
else
elog(LOG, "create directory \"%s\" and symbolic link \"%s\"",
linked_path, relative_ptr);
/* Firstly, create linked directory */
dir_create_dir(linked_path, DIR_PERMISSION);
join_path_components(to_path, data_dir, PG_TBLSPC_DIR);
/* Create pg_tblspc directory just in case */
dir_create_dir(to_path, DIR_PERMISSION);
/* Secondly, create link */
join_path_components(to_path, to_path, link_name);
if (symlink(linked_path, to_path) < 0)
elog(ERROR, "could not create symbolic link \"%s\": %s",
to_path, strerror(errno));
/* Save linked directory */
set_tablespace_created(link_name, linked_path);
/*
* Create rest of directories.
* First check is there any directory name after separator.
*/
if (link_sep != NULL && *(link_sep + 1) != '\0')
goto create_directory;
continue;
}
}
create_directory:
elog(LOG, "create directory \"%s\"", relative_ptr);
/* This is not symlink, create directory */
join_path_components(to_path, data_dir, relative_ptr);
dir_create_dir(to_path, DIR_PERMISSION);
}
if (extract_tablespaces)
{
parray_walk(links, pgFileFree);
parray_free(links);
}
parray_walk(dirs, pgFileFree);
parray_free(dirs);
}
/*
* Read names of symbolik names of tablespaces with links to directories from
* tablespace_map or tablespace_map.txt.
@ -799,6 +1106,70 @@ read_tablespace_map(parray *files, const char *backup_dir)
fclose(fp);
}
/*
* Check that all tablespace mapping entries have correct linked directory
* paths. Linked directories must be empty or do not exist.
*
* If tablespace-mapping option is supplied, all OLDDIR entries must have
* entries in tablespace_map file.
*/
void
check_tablespace_mapping(pgBackup *backup)
{
char this_backup_path[MAXPGPATH];
parray *links;
size_t i;
TablespaceListCell *cell;
pgFile *tmp_file = pgut_new(pgFile);
links = parray_new();
pgBackupGetPath(backup, this_backup_path, lengthof(this_backup_path), NULL);
read_tablespace_map(links, this_backup_path);
if (log_level_console <= LOG || log_level_file <= LOG)
elog(LOG, "check tablespace directories of backup %s",
base36enc(backup->start_time));
/* 1 - each OLDDIR must have an entry in tablespace_map file (links) */
for (cell = tablespace_dirs.head; cell; cell = cell->next)
{
tmp_file->linked = cell->old_dir;
if (parray_bsearch(links, tmp_file, pgFileCompareLinked) == NULL)
elog(ERROR, "--tablespace-mapping option's old directory "
"doesn't have an entry in tablespace_map file: \"%s\"",
cell->old_dir);
}
/* 2 - all linked directories must be empty */
for (i = 0; i < parray_num(links); i++)
{
pgFile *link = (pgFile *) parray_get(links, i);
const char *linked_path = link->linked;
TablespaceListCell *cell;
for (cell = tablespace_dirs.head; cell; cell = cell->next)
if (strcmp(link->linked, cell->old_dir) == 0)
{
linked_path = cell->new_dir;
break;
}
if (!is_absolute_path(linked_path))
elog(ERROR, "tablespace directory is not an absolute path: %s\n",
linked_path);
if (!dir_is_empty(linked_path))
elog(ERROR, "restore tablespace destination is not empty: \"%s\"",
linked_path);
}
free(tmp_file);
parray_walk(links, pgFileFree);
parray_free(links);
}
/*
* Print backup content list.
*/
@ -817,20 +1188,25 @@ print_file_list(FILE *out, const parray *files, const char *root)
if (root && strstr(path, root) == path)
path = GetRelativePath(path, root);
fprintf(out, "{\"path\":\"%s\", \"size\":\"%lu\",\"mode\":\"%u\","
"\"is_datafile\":\"%u\", \"is_cfs\":\"%u\", \"crc\":\"%u\","
fprintf(out, "{\"path\":\"%s\", \"size\":\"" INT64_FORMAT "\", "
"\"mode\":\"%u\", \"is_datafile\":\"%u\", "
"\"is_cfs\":\"%u\", \"crc\":\"%u\", "
"\"compress_alg\":\"%s\"",
path, (unsigned long) file->write_size, file->mode,
file->is_datafile?1:0, file->is_cfs?1:0, file->crc,
path, file->write_size, file->mode,
file->is_datafile ? 1 : 0, file->is_cfs ? 1 : 0, file->crc,
deparse_compress_alg(file->compress_alg));
if (file->is_datafile)
fprintf(out, ",\"segno\":\"%d\"", file->segno);
#ifndef WIN32
if (S_ISLNK(file->mode))
#else
if (pgwin32_is_junction(file->path))
#endif
fprintf(out, ",\"linked\":\"%s\"", file->linked);
if (file->n_blocks != -1)
if (file->n_blocks != BLOCKNUM_INVALID)
fprintf(out, ",\"n_blocks\":\"%i\"", file->n_blocks);
fprintf(out, "}\n");
@ -852,23 +1228,25 @@ print_file_list(FILE *out, const parray *files, const char *root)
* {"name1":"value1", "name2":"value2"}
*
* The value will be returned to "value_str" as string if it is not NULL. If it
* is NULL the value will be returned to "value_ulong" as unsigned long.
* is NULL the value will be returned to "value_int64" as int64.
*
* Returns true if the value was found in the line.
*/
static void
static bool
get_control_value(const char *str, const char *name,
char *value_str, uint64 *value_uint64, bool is_mandatory)
char *value_str, int64 *value_int64, bool is_mandatory)
{
int state = CONTROL_WAIT_NAME;
char *name_ptr = (char *) name;
char *buf = (char *) str;
char buf_uint64[32], /* Buffer for "value_uint64" */
*buf_uint64_ptr = buf_uint64;
char buf_int64[32], /* Buffer for "value_int64" */
*buf_int64_ptr = buf_int64;
/* Set default values */
if (value_str)
*value_str = '\0';
else if (value_uint64)
*value_uint64 = 0;
else if (value_int64)
*value_int64 = 0;
while (*buf)
{
@ -903,7 +1281,7 @@ get_control_value(const char *str, const char *name,
if (*buf == '"')
{
state = CONTROL_INVALUE;
buf_uint64_ptr = buf_uint64;
buf_int64_ptr = buf_int64;
}
else if (IsAlpha(*buf))
goto bad_format;
@ -916,19 +1294,19 @@ get_control_value(const char *str, const char *name,
{
*value_str = '\0';
}
else if (value_uint64)
else if (value_int64)
{
/* Length of buf_uint64 should not be greater than 31 */
if (buf_uint64_ptr - buf_uint64 >= 32)
if (buf_int64_ptr - buf_int64 >= 32)
elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s",
name, str, DATABASE_FILE_LIST);
*buf_uint64_ptr = '\0';
if (!parse_uint64(buf_uint64, value_uint64, 0))
*buf_int64_ptr = '\0';
if (!parse_int64(buf_int64, value_int64, 0))
goto bad_format;
}
return;
return true;
}
else
{
@ -939,8 +1317,8 @@ get_control_value(const char *str, const char *name,
}
else
{
*buf_uint64_ptr = *buf;
buf_uint64_ptr++;
*buf_int64_ptr = *buf;
buf_int64_ptr++;
}
}
break;
@ -964,11 +1342,12 @@ get_control_value(const char *str, const char *name,
if (is_mandatory)
elog(ERROR, "field \"%s\" is not found in the line %s of the file %s",
name, str, DATABASE_FILE_LIST);
return;
return false;
bad_format:
elog(ERROR, "%s file has invalid format in line %s",
DATABASE_FILE_LIST, str);
return false; /* Make compiler happy */
}
/*
@ -995,7 +1374,7 @@ dir_read_file_list(const char *root, const char *file_txt)
char filepath[MAXPGPATH];
char linked[MAXPGPATH];
char compress_alg_string[MAXPGPATH];
uint64 write_size,
int64 write_size,
mode, /* bit length of mode_t depends on platforms */
is_datafile,
is_cfs,
@ -1010,12 +1389,7 @@ dir_read_file_list(const char *root, const char *file_txt)
get_control_value(buf, "is_datafile", NULL, &is_datafile, true);
get_control_value(buf, "is_cfs", NULL, &is_cfs, false);
get_control_value(buf, "crc", NULL, &crc, true);
/* optional fields */
get_control_value(buf, "linked", linked, NULL, false);
get_control_value(buf, "segno", NULL, &segno, false);
get_control_value(buf, "compress_alg", compress_alg_string, NULL, false);
get_control_value(buf, "n_blocks", NULL, &n_blocks, false);
if (root)
join_path_components(filepath, root, path);
@ -1024,16 +1398,25 @@ dir_read_file_list(const char *root, const char *file_txt)
file = pgFileInit(filepath);
file->write_size = (size_t) write_size;
file->write_size = (int64) write_size;
file->mode = (mode_t) mode;
file->is_datafile = is_datafile ? true : false;
file->is_cfs = is_cfs ? true : false;
file->crc = (pg_crc32) crc;
file->compress_alg = parse_compress_alg(compress_alg_string);
if (linked[0])
/*
* Optional fields
*/
if (get_control_value(buf, "linked", linked, NULL, false) && linked[0])
file->linked = pgut_strdup(linked);
file->segno = (int) segno;
file->n_blocks = (int) n_blocks;
if (get_control_value(buf, "segno", NULL, &segno, false))
file->segno = (int) segno;
if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false))
file->n_blocks = (int) n_blocks;
parray_append(files, file);
}
@ -1095,3 +1478,14 @@ fileExists(const char *path)
else
return true;
}
size_t
pgFileSize(const char *path)
{
struct stat buf;
if (stat(path, &buf) == -1)
elog(ERROR, "Cannot stat file \"%s\": %s", path, strerror(errno));
return buf.st_size;
}

View File

@ -14,6 +14,7 @@ static void help_restore(void);
static void help_validate(void);
static void help_show(void);
static void help_delete(void);
static void help_merge(void);
static void help_set_config(void);
static void help_show_config(void);
static void help_add_instance(void);
@ -36,6 +37,8 @@ help_command(char *command)
help_show();
else if (strcmp(command, "delete") == 0)
help_delete();
else if (strcmp(command, "merge") == 0)
help_merge();
else if (strcmp(command, "set-config") == 0)
help_set_config();
else if (strcmp(command, "show-config") == 0)
@ -56,7 +59,7 @@ help_command(char *command)
|| strcmp(command, "-V") == 0)
printf(_("No help page for \"%s\" command. Try pg_probackup help\n"), command);
else
printf(_("Unknown command. Try pg_probackup help\n"));
printf(_("Unknown command \"%s\". Try pg_probackup help\n"), command);
exit(0);
}
@ -87,8 +90,10 @@ help_pg_probackup(void)
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
printf(_(" [--replica-timeout=timeout]\n"));
printf(_(" [--archive-timeout=timeout]\n"));
printf(_("\n %s show-config -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--format=format]\n"));
printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-C] [--stream [-S slot-name]] [--backup-pg-log]\n"));
@ -111,26 +116,31 @@ help_pg_probackup(void)
printf(_(" [-w --no-password] [-W --password]\n"));
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
printf(_(" [--replica-timeout=timeout]\n\n"));
printf(_(" [--replica-timeout=timeout]\n"));
printf(_("\n %s restore -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-dir] [-i backup-id] [--progress]\n"));
printf(_(" [--time=time|--xid=xid [--inclusive=boolean]]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--timeline=timeline] [-T OLDDIR=NEWDIR]\n"));
printf(_(" [--immediate] [--recovery-target-name=target-name]\n"));
printf(_(" [--recovery-target-action=pause|promote|shutdown]\n"));
printf(_(" [--restore-as-replica]\n"));
printf(_(" [--no-validate]\n"));
printf(_("\n %s validate -B backup-dir [--instance=instance_name]\n"), PROGRAM_NAME);
printf(_(" [-i backup-id] [--progress]\n"));
printf(_(" [--time=time|--xid=xid [--inclusive=boolean]]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--recovery-target-name=target-name]\n"));
printf(_(" [--timeline=timeline]\n"));
printf(_("\n %s show -B backup-dir\n"), PROGRAM_NAME);
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
printf(_(" [--format=format]\n"));
printf(_("\n %s delete -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--wal] [-i backup-id | --expired]\n"));
printf(_("\n %s merge -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" -i backup-id\n"));
printf(_("\n %s add-instance -B backup-dir -D pgdata-dir\n"), PROGRAM_NAME);
printf(_(" --instance=instance_name\n"));
@ -253,7 +263,7 @@ help_backup(void)
printf(_(" --master-db=db_name database to connect to master\n"));
printf(_(" --master-host=host_name database server host of master\n"));
printf(_(" --master-port=port database server port of master\n"));
printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication in seconds\n"));
printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (default: 5min)\n"));
}
static void
@ -261,11 +271,11 @@ help_restore(void)
{
printf(_("%s restore -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-dir] [-i backup-id] [--progress]\n"));
printf(_(" [--time=time|--xid=xid [--inclusive=boolean]]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--timeline=timeline] [-T OLDDIR=NEWDIR]\n"));
printf(_(" [--immediate] [--recovery-target-name=target-name]\n"));
printf(_(" [--recovery-target-action=pause|promote|shutdown]\n"));
printf(_(" [--restore-as-replica]\n\n"));
printf(_(" [--restore-as-replica] [--no-validate]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
@ -276,6 +286,7 @@ help_restore(void)
printf(_(" --progress show progress\n"));
printf(_(" --time=time time stamp up to which recovery will proceed\n"));
printf(_(" --xid=xid transaction ID up to which recovery will proceed\n"));
printf(_(" --lsn=lsn LSN of the write-ahead log location up to which recovery will proceed\n"));
printf(_(" --inclusive=boolean whether we stop just after the recovery target\n"));
printf(_(" --timeline=timeline recovering into a particular timeline\n"));
printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n"));
@ -290,6 +301,7 @@ help_restore(void)
printf(_(" -R, --restore-as-replica write a minimal recovery.conf in the output directory\n"));
printf(_(" to ease setting up a standby server\n"));
printf(_(" --no-validate disable backup validation during restore\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
@ -318,7 +330,7 @@ help_validate(void)
{
printf(_("%s validate -B backup-dir [--instance=instance_name]\n"), PROGRAM_NAME);
printf(_(" [-i backup-id] [--progress]\n"));
printf(_(" [--time=time|--xid=xid [--inclusive=boolean]]\n"));
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
printf(_(" [--timeline=timeline]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
@ -328,8 +340,11 @@ help_validate(void)
printf(_(" --progress show progress\n"));
printf(_(" --time=time time stamp up to which recovery will proceed\n"));
printf(_(" --xid=xid transaction ID up to which recovery will proceed\n"));
printf(_(" --lsn=lsn LSN of the write-ahead log location up to which recovery will proceed\n"));
printf(_(" --inclusive=boolean whether we stop just after the recovery target\n"));
printf(_(" --timeline=timeline recovering into a particular timeline\n"));
printf(_(" --recovery-target-name=target-name\n"));
printf(_(" the named restore point to which recovery will proceed\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
@ -357,11 +372,13 @@ static void
help_show(void)
{
printf(_("%s show -B backup-dir\n"), PROGRAM_NAME);
printf(_(" [--instance=instance_name [-i backup-id]]\n\n"));
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
printf(_(" [--format=format]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name show info about specific intstance\n"));
printf(_(" -i, --backup-id=backup-id show info about specific backups\n"));
printf(_(" --format=format show format=PLAIN|JSON\n"));
}
static void
@ -399,6 +416,48 @@ help_delete(void)
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
}
static void
help_merge(void)
{
printf(_("%s merge -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" -i backup-id [-j num-threads] [--progress]\n"));
printf(_(" [--log-level-console=log-level-console]\n"));
printf(_(" [--log-level-file=log-level-file]\n"));
printf(_(" [--log-filename=log-filename]\n"));
printf(_(" [--error-log-filename=error-log-filename]\n"));
printf(_(" [--log-directory=log-directory]\n"));
printf(_(" [--log-rotation-size=log-rotation-size]\n"));
printf(_(" [--log-rotation-age=log-rotation-age]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" -i, --backup-id=backup-id backup to merge\n"));
printf(_(" -j, --threads=NUM number of parallel threads\n"));
printf(_(" --progress show progress\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
printf(_(" level for console logging (default: info)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-level-file=log-level-file\n"));
printf(_(" level for file logging (default: off)\n"));
printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n"));
printf(_(" --log-filename=log-filename\n"));
printf(_(" filename for file logging (default: 'pg_probackup.log')\n"));
printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n"));
printf(_(" --error-log-filename=error-log-filename\n"));
printf(_(" filename for error logging (default: none)\n"));
printf(_(" --log-directory=log-directory\n"));
printf(_(" directory for file logging (default: BACKUP_PATH/log)\n"));
printf(_(" --log-rotation-size=log-rotation-size\n"));
printf(_(" rotate logfile if its size exceed this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'KB', 'MB', 'GB', 'TB' (default: KB)\n"));
printf(_(" --log-rotation-age=log-rotation-age\n"));
printf(_(" rotate logfile if its age exceed this value; 0 disables; (default: 0)\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n"));
}
static void
help_set_config(void)
{
@ -418,6 +477,7 @@ help_set_config(void)
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
printf(_(" [--replica-timeout=timeout]\n\n"));
printf(_(" [--archive-timeout=timeout]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
@ -466,16 +526,20 @@ help_set_config(void)
printf(_(" --master-db=db_name database to connect to master\n"));
printf(_(" --master-host=host_name database server host of master\n"));
printf(_(" --master-port=port database server port of master\n"));
printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication\n"));
printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (default: 5min)\n"));
printf(_("\n Archive options:\n"));
printf(_(" --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n"));
}
static void
help_show_config(void)
{
printf(_("%s show-config -B backup-dir --instance=instance_name\n\n"), PROGRAM_NAME);
printf(_("%s show-config -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--format=format]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" --format=format show format=PLAIN|JSON\n"));
}
static void

525
src/merge.c Normal file
View File

@ -0,0 +1,525 @@
/*-------------------------------------------------------------------------
*
* merge.c: merge FULL and incremental backups
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <sys/stat.h>
#include <unistd.h>
#include "utils/thread.h"
typedef struct
{
parray *to_files;
parray *files;
pgBackup *to_backup;
pgBackup *from_backup;
const char *to_root;
const char *from_root;
/*
* Return value from the thread.
* 0 means there is no error, 1 - there is an error.
*/
int ret;
} merge_files_arg;
static void merge_backups(pgBackup *backup, pgBackup *next_backup);
static void *merge_files(void *arg);
/*
* Implementation of MERGE command.
*
* - Find target and its parent full backup
* - Merge data files of target, parent and and intermediate backups
* - Remove unnecessary files, which doesn't exist in the target backup anymore
*/
void
do_merge(time_t backup_id)
{
parray *backups;
pgBackup *dest_backup = NULL;
pgBackup *full_backup = NULL;
time_t prev_parent = INVALID_BACKUP_ID;
int i;
int dest_backup_idx = 0;
int full_backup_idx = 0;
if (backup_id == INVALID_BACKUP_ID)
elog(ERROR, "required parameter is not specified: --backup-id");
if (instance_name == NULL)
elog(ERROR, "required parameter is not specified: --instance");
elog(LOG, "Merge started");
catalog_lock();
/* Get list of all backups sorted in order of descending start time */
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
/* Find destination and parent backups */
for (i = 0; i < parray_num(backups); i++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
if (backup->start_time > backup_id)
continue;
else if (backup->start_time == backup_id && !dest_backup)
{
if (backup->status != BACKUP_STATUS_OK)
elog(ERROR, "Backup %s has status: %s",
base36enc(backup->start_time), status2str(backup->status));
if (backup->backup_mode == BACKUP_MODE_FULL)
elog(ERROR, "Backup %s if full backup",
base36enc(backup->start_time));
dest_backup = backup;
dest_backup_idx = i;
}
else
{
Assert(dest_backup);
if (backup->start_time != prev_parent)
continue;
if (backup->status != BACKUP_STATUS_OK)
elog(ERROR, "Skipping backup %s, because it has non-valid status: %s",
base36enc(backup->start_time), status2str(backup->status));
/* If we already found dest_backup, look for full backup */
if (dest_backup && backup->backup_mode == BACKUP_MODE_FULL)
{
if (backup->status != BACKUP_STATUS_OK)
elog(ERROR, "Parent full backup %s for the given backup %s has status: %s",
base36enc_dup(backup->start_time),
base36enc_dup(dest_backup->start_time),
status2str(backup->status));
full_backup = backup;
full_backup_idx = i;
/* Found target and full backups, so break the loop */
break;
}
}
prev_parent = backup->parent_backup;
}
if (dest_backup == NULL)
elog(ERROR, "Target backup %s was not found", base36enc(backup_id));
if (full_backup == NULL)
elog(ERROR, "Parent full backup for the given backup %s was not found",
base36enc(backup_id));
Assert(full_backup_idx != dest_backup_idx);
/*
* Found target and full backups, merge them and intermediate backups
*/
for (i = full_backup_idx; i > dest_backup_idx; i--)
{
pgBackup *to_backup = (pgBackup *) parray_get(backups, i);
pgBackup *from_backup = (pgBackup *) parray_get(backups, i - 1);
merge_backups(to_backup, from_backup);
}
/* cleanup */
parray_walk(backups, pgBackupFree);
parray_free(backups);
elog(LOG, "Merge completed");
}
/*
* Merge two backups data files using threads.
* - move instance files from from_backup to to_backup
* - remove unnecessary directories and files from to_backup
* - update metadata of from_backup, it becames FULL backup
*/
static void
merge_backups(pgBackup *to_backup, pgBackup *from_backup)
{
char *to_backup_id = base36enc_dup(to_backup->start_time),
*from_backup_id = base36enc_dup(from_backup->start_time);
char to_backup_path[MAXPGPATH],
to_database_path[MAXPGPATH],
from_backup_path[MAXPGPATH],
from_database_path[MAXPGPATH],
control_file[MAXPGPATH];
parray *files,
*to_files;
pthread_t *threads;
merge_files_arg *threads_args;
int i;
bool merge_isok = true;
elog(LOG, "Merging backup %s with backup %s", from_backup_id, to_backup_id);
to_backup->status = BACKUP_STATUS_MERGING;
pgBackupWriteBackupControlFile(to_backup);
from_backup->status = BACKUP_STATUS_MERGING;
pgBackupWriteBackupControlFile(from_backup);
/*
* Make backup paths.
*/
pgBackupGetPath(to_backup, to_backup_path, lengthof(to_backup_path), NULL);
pgBackupGetPath(to_backup, to_database_path, lengthof(to_database_path),
DATABASE_DIR);
pgBackupGetPath(from_backup, from_backup_path, lengthof(from_backup_path), NULL);
pgBackupGetPath(from_backup, from_database_path, lengthof(from_database_path),
DATABASE_DIR);
create_data_directories(to_database_path, from_backup_path, false);
/*
* Get list of files which will be modified or removed.
*/
pgBackupGetPath(to_backup, control_file, lengthof(control_file),
DATABASE_FILE_LIST);
to_files = dir_read_file_list(from_database_path, /* Use from_database_path
* so root path will be
* equal with 'files' */
control_file);
/* To delete from leaf, sort in reversed order */
parray_qsort(to_files, pgFileComparePathDesc);
/*
* Get list of files which need to be moved.
*/
pgBackupGetPath(from_backup, control_file, lengthof(control_file),
DATABASE_FILE_LIST);
files = dir_read_file_list(from_database_path, control_file);
/* sort by size for load balancing */
parray_qsort(files, pgFileCompareSize);
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
threads_args = (merge_files_arg *) palloc(sizeof(merge_files_arg) * num_threads);
/* Setup threads */
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
pg_atomic_init_flag(&file->lock);
}
for (i = 0; i < num_threads; i++)
{
merge_files_arg *arg = &(threads_args[i]);
arg->to_files = to_files;
arg->files = files;
arg->to_backup = to_backup;
arg->from_backup = from_backup;
arg->to_root = to_database_path;
arg->from_root = from_database_path;
/* By default there are some error */
arg->ret = 1;
elog(VERBOSE, "Start thread: %d", i);
pthread_create(&threads[i], NULL, merge_files, arg);
}
/* Wait threads */
for (i = 0; i < num_threads; i++)
{
pthread_join(threads[i], NULL);
if (threads_args[i].ret == 1)
merge_isok = false;
}
if (!merge_isok)
elog(ERROR, "Data files merging failed");
/*
* Files were copied into to_backup and deleted from from_backup. Remove
* remaining directories from from_backup.
*/
parray_qsort(files, pgFileComparePathDesc);
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
if (!S_ISDIR(file->mode))
continue;
if (rmdir(file->path))
elog(ERROR, "Could not remove directory \"%s\": %s",
file->path, strerror(errno));
}
if (rmdir(from_database_path))
elog(ERROR, "Could not remove directory \"%s\": %s",
from_database_path, strerror(errno));
if (unlink(control_file))
elog(ERROR, "Could not remove file \"%s\": %s",
control_file, strerror(errno));
pgBackupGetPath(from_backup, control_file, lengthof(control_file),
BACKUP_CONTROL_FILE);
if (unlink(control_file))
elog(ERROR, "Could not remove file \"%s\": %s",
control_file, strerror(errno));
if (rmdir(from_backup_path))
elog(ERROR, "Could not remove directory \"%s\": %s",
from_backup_path, strerror(errno));
/*
* Delete files which are not in from_backup file list.
*/
for (i = 0; i < parray_num(to_files); i++)
{
pgFile *file = (pgFile *) parray_get(to_files, i);
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
{
pgFileDelete(file);
elog(LOG, "Deleted \"%s\"", file->path);
}
}
/*
* Rename FULL backup directory.
*/
if (rename(to_backup_path, from_backup_path) == -1)
elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s",
to_backup_path, from_backup_path, strerror(errno));
/*
* Update to_backup metadata.
*/
pgBackupCopy(to_backup, from_backup);
/* Correct metadata */
to_backup->backup_mode = BACKUP_MODE_FULL;
to_backup->status = BACKUP_STATUS_OK;
to_backup->parent_backup = INVALID_BACKUP_ID;
/* Compute summary of size of regular files in the backup */
to_backup->data_bytes = 0;
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
if (S_ISDIR(file->mode))
to_backup->data_bytes += 4096;
/* Count the amount of the data actually copied */
else if (S_ISREG(file->mode))
to_backup->data_bytes += file->write_size;
}
/* compute size of wal files of this backup stored in the archive */
if (!current.stream)
to_backup->wal_bytes = XLOG_SEG_SIZE *
(to_backup->stop_lsn / XLogSegSize - to_backup->start_lsn / XLogSegSize + 1);
else
to_backup->wal_bytes = BYTES_INVALID;
pgBackupWriteFileList(to_backup, files, from_database_path);
pgBackupWriteBackupControlFile(to_backup);
/* Cleanup */
pfree(threads_args);
pfree(threads);
parray_walk(to_files, pgFileFree);
parray_free(to_files);
parray_walk(files, pgFileFree);
parray_free(files);
pfree(to_backup_id);
pfree(from_backup_id);
}
/*
* Thread worker of merge_backups().
*/
static void *
merge_files(void *arg)
{
merge_files_arg *argument = (merge_files_arg *) arg;
pgBackup *to_backup = argument->to_backup;
pgBackup *from_backup = argument->from_backup;
char tmp_file_path[MAXPGPATH];
int i,
num_files = parray_num(argument->files);
int to_root_len = strlen(argument->to_root);
if (to_backup->compress_alg == PGLZ_COMPRESS ||
to_backup->compress_alg == ZLIB_COMPRESS)
join_path_components(tmp_file_path, argument->to_root, "tmp");
for (i = 0; i < num_files; i++)
{
pgFile *file = (pgFile *) parray_get(argument->files, i);
if (!pg_atomic_test_set_flag(&file->lock))
continue;
/* check for interrupt */
if (interrupted)
elog(ERROR, "Interrupted during merging backups");
if (progress)
elog(LOG, "Progress: (%d/%d). Process file \"%s\"",
i + 1, num_files, file->path);
/*
* Skip files which haven't changed since previous backup. But in case
* of DELTA backup we should consider n_blocks to truncate the target
* backup.
*/
if (file->write_size == BYTES_INVALID &&
file->n_blocks == -1)
{
elog(VERBOSE, "Skip merging file \"%s\", the file didn't change",
file->path);
/*
* If the file wasn't changed in PAGE backup, retreive its
* write_size from previous FULL backup.
*/
if (S_ISREG(file->mode))
{
pgFile **res_file;
res_file = parray_bsearch(argument->to_files, file,
pgFileComparePathDesc);
if (res_file && *res_file)
{
file->compress_alg = (*res_file)->compress_alg;
file->write_size = (*res_file)->write_size;
file->crc = (*res_file)->crc;
}
}
continue;
}
/* Directories were created before */
if (S_ISDIR(file->mode))
continue;
/*
* Move the file. We need to decompress it and compress again if
* necessary.
*/
elog(VERBOSE, "Moving file \"%s\", is_datafile %d, is_cfs %d",
file->path, file->is_database, file->is_cfs);
if (file->is_datafile && !file->is_cfs)
{
char to_path_tmp[MAXPGPATH]; /* Path of target file */
join_path_components(to_path_tmp, argument->to_root,
file->path + to_root_len + 1);
/*
* We need more complicate algorithm if target file exists and it is
* compressed.
*/
if (to_backup->compress_alg == PGLZ_COMPRESS ||
to_backup->compress_alg == ZLIB_COMPRESS)
{
char *prev_path;
/* Start the magic */
/*
* Merge files:
* - decompress first file
* - decompress second file and merge with first decompressed file
* - compress result file
*/
elog(VERBOSE, "File is compressed, decompress to the temporary file \"%s\"",
tmp_file_path);
prev_path = file->path;
/*
* We need to decompress target file only if it exists.
*/
if (fileExists(to_path_tmp))
{
/*
* file->path points to the file in from_root directory. But we
* need the file in directory to_root.
*/
file->path = to_path_tmp;
/* Decompress first/target file */
restore_data_file(tmp_file_path, file, false, false);
file->path = prev_path;
}
/* Merge second/source file with first/target file */
restore_data_file(tmp_file_path, file,
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
false);
elog(VERBOSE, "Compress file and save it to the directory \"%s\"",
argument->to_root);
/* Again we need change path */
file->path = tmp_file_path;
/* backup_data_file() requires file size to calculate nblocks */
file->size = pgFileSize(file->path);
/* Now we can compress the file */
backup_data_file(NULL, /* We shouldn't need 'arguments' here */
to_path_tmp, file,
to_backup->start_lsn,
to_backup->backup_mode,
to_backup->compress_alg,
to_backup->compress_level);
file->path = prev_path;
/* We can remove temporary file now */
if (unlink(tmp_file_path))
elog(ERROR, "Could not remove temporary file \"%s\": %s",
tmp_file_path, strerror(errno));
}
/*
* Otherwise merging algorithm is simpler.
*/
else
{
/* We can merge in-place here */
restore_data_file(to_path_tmp, file,
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
true);
/*
* We need to calculate write_size, restore_data_file() doesn't
* do that.
*/
file->write_size = pgFileSize(to_path_tmp);
file->crc = pgFileGetCRC(to_path_tmp);
}
pgFileDelete(file);
}
else
move_file(argument->from_root, argument->to_root, file);
if (file->write_size != BYTES_INVALID)
elog(LOG, "Moved file \"%s\": " INT64_FORMAT " bytes",
file->path, file->write_size);
}
/* Data files merging is successful */
argument->ret = 0;
return NULL;
}

View File

@ -5,24 +5,23 @@
*
* Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
* Portions Copyright (c) 2015-2017, Postgres Professional
* Portions Copyright (c) 2015-2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include "pg_probackup.h"
#include <time.h>
#include <unistd.h>
#ifdef HAVE_LIBZ
#include <zlib.h>
#endif
#include "commands/dbcommands_xlog.h"
#include "catalog/storage_xlog.h"
#include "access/transam.h"
#ifdef HAVE_LIBZ
#include <zlib.h>
#endif
#include "utils/thread.h"
/*
* RmgrNames is an array of resource manager names, to make error messages
@ -85,74 +84,134 @@ typedef struct xl_xact_abort
static void extractPageInfo(XLogReaderState *record);
static bool getRecordTimestamp(XLogReaderState *record, TimestampTz *recordXtime);
static int xlogreadfd = -1;
static XLogSegNo xlogreadsegno = -1;
static char xlogfpath[MAXPGPATH];
static bool xlogexists = false;
#ifdef HAVE_LIBZ
static gzFile gz_xlogread = NULL;
static char gz_xlogfpath[MAXPGPATH];
#endif
typedef struct XLogPageReadPrivate
{
const char *archivedir;
TimeLineID tli;
bool manual_switch;
bool need_switch;
int xlogfile;
XLogSegNo xlogsegno;
char xlogpath[MAXPGPATH];
bool xlogexists;
#ifdef HAVE_LIBZ
gzFile gz_xlogfile;
char gz_xlogpath[MAXPGPATH];
#endif
} XLogPageReadPrivate;
/* An argument for a thread function */
typedef struct
{
int thread_num;
XLogPageReadPrivate private_data;
XLogRecPtr startpoint;
XLogRecPtr endpoint;
XLogSegNo endSegNo;
/*
* Return value from the thread.
* 0 means there is no error, 1 - there is an error.
*/
int ret;
} xlog_thread_arg;
static int SimpleXLogPageRead(XLogReaderState *xlogreader,
XLogRecPtr targetPagePtr,
int reqLen, XLogRecPtr targetRecPtr, char *readBuf,
TimeLineID *pageTLI);
static XLogReaderState *InitXLogPageRead(XLogPageReadPrivate *private_data,
const char *archivedir,
TimeLineID tli, bool allocate_reader);
static void CleanupXLogPageRead(XLogReaderState *xlogreader);
static void PrintXLogCorruptionMsg(XLogPageReadPrivate *private_data,
int elevel);
static XLogSegNo nextSegNoToRead = 0;
static pthread_mutex_t wal_segment_mutex = PTHREAD_MUTEX_INITIALIZER;
/*
* Read WAL from the archive directory, from 'startpoint' to 'endpoint' on the
* given timeline. Collect data blocks touched by the WAL records into a page map.
*
* If **prev_segno** is true then read all segments up to **endpoint** segment
* minus one. Else read all segments up to **endpoint** segment.
* extractPageMap() worker.
*/
void
extractPageMap(const char *archivedir, XLogRecPtr startpoint, TimeLineID tli,
XLogRecPtr endpoint, bool prev_segno, parray *files)
static void *
doExtractPageMap(void *arg)
{
size_t i;
XLogRecord *record;
xlog_thread_arg *extract_arg = (xlog_thread_arg *) arg;
XLogPageReadPrivate *private_data;
XLogReaderState *xlogreader;
XLogSegNo nextSegNo = 0;
char *errormsg;
XLogPageReadPrivate private;
XLogSegNo endSegNo,
nextSegNo = 0;
elog(LOG, "Compiling pagemap");
if (!XRecOffIsValid(startpoint))
elog(ERROR, "Invalid startpoint value %X/%X",
(uint32) (startpoint >> 32), (uint32) (startpoint));
if (!XRecOffIsValid(endpoint))
elog(ERROR, "Invalid endpoint value %X/%X",
(uint32) (endpoint >> 32), (uint32) (endpoint));
private.archivedir = archivedir;
private.tli = tli;
xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, &private);
private_data = &extract_arg->private_data;
xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, private_data);
if (xlogreader == NULL)
elog(ERROR, "out of memory");
XLByteToSeg(endpoint, endSegNo);
if (prev_segno)
endSegNo--;
extract_arg->startpoint = XLogFindNextRecord(xlogreader,
extract_arg->startpoint);
elog(VERBOSE, "Start LSN of thread %d: %X/%X",
extract_arg->thread_num,
(uint32) (extract_arg->startpoint >> 32),
(uint32) (extract_arg->startpoint));
/* Switch WAL segment manually below without using SimpleXLogPageRead() */
private_data->manual_switch = true;
do
{
record = XLogReadRecord(xlogreader, startpoint, &errormsg);
XLogRecord *record;
if (interrupted)
elog(ERROR, "Interrupted during WAL reading");
record = XLogReadRecord(xlogreader, extract_arg->startpoint, &errormsg);
if (record == NULL)
{
XLogRecPtr errptr;
errptr = startpoint ? startpoint : xlogreader->EndRecPtr;
/*
* Try to switch to the next WAL segment. Usually
* SimpleXLogPageRead() does it by itself. But here we need to do it
* manually to support threads.
*/
if (private_data->need_switch)
{
private_data->need_switch = false;
/* Critical section */
pthread_lock(&wal_segment_mutex);
Assert(nextSegNoToRead);
private_data->xlogsegno = nextSegNoToRead;
nextSegNoToRead++;
pthread_mutex_unlock(&wal_segment_mutex);
/* We reach the end */
if (private_data->xlogsegno > extract_arg->endSegNo)
break;
/* Adjust next record position */
XLogSegNoOffsetToRecPtr(private_data->xlogsegno, 0,
extract_arg->startpoint);
/* Skip over the page header */
extract_arg->startpoint = XLogFindNextRecord(xlogreader,
extract_arg->startpoint);
elog(VERBOSE, "Thread %d switched to LSN %X/%X",
extract_arg->thread_num,
(uint32) (extract_arg->startpoint >> 32),
(uint32) (extract_arg->startpoint));
continue;
}
errptr = extract_arg->startpoint ?
extract_arg->startpoint : xlogreader->EndRecPtr;
if (errormsg)
elog(WARNING, "could not read WAL record at %X/%X: %s",
@ -167,37 +226,127 @@ extractPageMap(const char *archivedir, XLogRecPtr startpoint, TimeLineID tli,
* start_lsn, we won't be able to build page map and PAGE backup will
* be incorrect. Stop it and throw an error.
*/
if (!xlogexists)
elog(ERROR, "WAL segment \"%s\" is absent", xlogfpath);
else if (xlogreadfd != -1)
elog(ERROR, "Possible WAL CORRUPTION."
"Error has occured during reading WAL segment \"%s\"", xlogfpath);
PrintXLogCorruptionMsg(private_data, ERROR);
}
extractPageInfo(xlogreader);
startpoint = InvalidXLogRecPtr; /* continue reading at next record */
/* continue reading at next record */
extract_arg->startpoint = InvalidXLogRecPtr;
XLByteToSeg(xlogreader->EndRecPtr, nextSegNo);
} while (nextSegNo <= endSegNo && xlogreader->EndRecPtr != endpoint);
} while (nextSegNo <= extract_arg->endSegNo &&
xlogreader->EndRecPtr < extract_arg->endpoint);
CleanupXLogPageRead(xlogreader);
XLogReaderFree(xlogreader);
if (xlogreadfd != -1)
/* Extracting is successful */
extract_arg->ret = 0;
return NULL;
}
/*
* Read WAL from the archive directory, from 'startpoint' to 'endpoint' on the
* given timeline. Collect data blocks touched by the WAL records into a page map.
*
* If **prev_segno** is true then read all segments up to **endpoint** segment
* minus one. Else read all segments up to **endpoint** segment.
*
* Pagemap extracting is processed using threads. Eeach thread reads single WAL
* file.
*/
void
extractPageMap(const char *archivedir, XLogRecPtr startpoint, TimeLineID tli,
XLogRecPtr endpoint, bool prev_seg, parray *files)
{
int i;
int threads_need = 0;
XLogSegNo endSegNo;
bool extract_isok = true;
pthread_t *threads;
xlog_thread_arg *thread_args;
time_t start_time,
end_time;
elog(LOG, "Compiling pagemap");
if (!XRecOffIsValid(startpoint))
elog(ERROR, "Invalid startpoint value %X/%X",
(uint32) (startpoint >> 32), (uint32) (startpoint));
if (!XRecOffIsValid(endpoint))
elog(ERROR, "Invalid endpoint value %X/%X",
(uint32) (endpoint >> 32), (uint32) (endpoint));
XLByteToSeg(endpoint, endSegNo);
if (prev_seg)
endSegNo--;
nextSegNoToRead = 0;
time(&start_time);
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
thread_args = (xlog_thread_arg *) palloc(sizeof(xlog_thread_arg)*num_threads);
/*
* Initialize thread args.
*
* Each thread works with its own WAL segment and we need to adjust
* startpoint value for each thread.
*/
for (i = 0; i < num_threads; i++)
{
close(xlogreadfd);
xlogreadfd = -1;
xlogexists = false;
InitXLogPageRead(&thread_args[i].private_data, archivedir, tli, false);
thread_args[i].thread_num = i;
thread_args[i].startpoint = startpoint;
thread_args[i].endpoint = endpoint;
thread_args[i].endSegNo = endSegNo;
/* By default there is some error */
thread_args[i].ret = 1;
/* Adjust startpoint to the next thread */
if (nextSegNoToRead == 0)
XLByteToSeg(startpoint, nextSegNoToRead);
nextSegNoToRead++;
/*
* If we need to read less WAL segments than num_threads, create less
* threads.
*/
if (nextSegNoToRead > endSegNo)
break;
XLogSegNoOffsetToRecPtr(nextSegNoToRead, 0, startpoint);
/* Skip over the page header */
startpoint += SizeOfXLogLongPHD;
threads_need++;
}
/* Mark every datafile with empty pagemap as unchanged */
for (i = 0; i < parray_num(files); i++)
/* Run threads */
for (i = 0; i < threads_need; i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
if (file->is_datafile && file->pagemap.bitmap == NULL)
file->pagemap.bitmapsize = PageBitmapIsEmpty;
elog(VERBOSE, "Start WAL reader thread: %d", i);
pthread_create(&threads[i], NULL, doExtractPageMap, &thread_args[i]);
}
elog(LOG, "Pagemap compiled");
/* Wait for threads */
for (i = 0; i < threads_need; i++)
{
pthread_join(threads[i], NULL);
if (thread_args[i].ret == 1)
extract_isok = false;
}
pfree(threads);
pfree(thread_args);
time(&end_time);
if (extract_isok)
elog(LOG, "Pagemap compiled, time elapsed %.0f sec",
difftime(end_time, start_time));
else
elog(ERROR, "Pagemap compiling failed");
}
/*
@ -205,8 +354,7 @@ extractPageMap(const char *archivedir, XLogRecPtr startpoint, TimeLineID tli,
*/
static void
validate_backup_wal_from_start_to_stop(pgBackup *backup,
char *backup_xlog_path,
TimeLineID tli)
char *backup_xlog_path, TimeLineID tli)
{
XLogRecPtr startpoint = backup->start_lsn;
XLogRecord *record;
@ -215,15 +363,7 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup,
XLogPageReadPrivate private;
bool got_endpoint = false;
private.archivedir = backup_xlog_path;
private.tli = tli;
/* We will check it in the end */
xlogfpath[0] = '\0';
xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, &private);
if (xlogreader == NULL)
elog(ERROR, "out of memory");
xlogreader = InitXLogPageRead(&private, backup_xlog_path, tli, true);
while (true)
{
@ -248,45 +388,27 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup,
if (!got_endpoint)
{
if (xlogfpath[0] != 0)
{
/* XLOG reader couldn't read WAL segment.
* We throw a WARNING here to be able to update backup status below.
*/
if (!xlogexists)
{
elog(WARNING, "WAL segment \"%s\" is absent", xlogfpath);
}
else if (xlogreadfd != -1)
{
elog(WARNING, "Possible WAL CORRUPTION."
"Error has occured during reading WAL segment \"%s\"", xlogfpath);
}
}
PrintXLogCorruptionMsg(&private, WARNING);
/*
* If we don't have WAL between start_lsn and stop_lsn,
* the backup is definitely corrupted. Update its status.
*/
backup->status = BACKUP_STATUS_CORRUPT;
pgBackupWriteBackupControlFile(backup);
elog(WARNING, "There are not enough WAL records to consistenly restore "
"backup %s from START LSN: %X/%X to STOP LSN: %X/%X",
base36enc(backup->start_time),
(uint32) (backup->start_lsn >> 32),
(uint32) (backup->start_lsn),
(uint32) (backup->stop_lsn >> 32),
(uint32) (backup->stop_lsn));
backup->status = BACKUP_STATUS_CORRUPT;
pgBackupWriteBackupControlFile(backup);
elog(WARNING, "There are not enough WAL records to consistenly restore "
"backup %s from START LSN: %X/%X to STOP LSN: %X/%X",
base36enc(backup->start_time),
(uint32) (backup->start_lsn >> 32),
(uint32) (backup->start_lsn),
(uint32) (backup->stop_lsn >> 32),
(uint32) (backup->stop_lsn));
}
/* clean */
CleanupXLogPageRead(xlogreader);
XLogReaderFree(xlogreader);
if (xlogreadfd != -1)
{
close(xlogreadfd);
xlogreadfd = -1;
xlogexists = false;
}
}
/*
@ -299,6 +421,7 @@ validate_wal(pgBackup *backup,
const char *archivedir,
time_t target_time,
TransactionId target_xid,
XLogRecPtr target_lsn,
TimeLineID tli)
{
XLogRecPtr startpoint = backup->start_lsn;
@ -350,7 +473,7 @@ validate_wal(pgBackup *backup,
* If recovery target is provided check that we can restore backup to a
* recovery target time or xid.
*/
if (!TransactionIdIsValid(target_xid) && target_time == 0)
if (!TransactionIdIsValid(target_xid) && target_time == 0 && !XRecOffIsValid(target_lsn))
{
/* Recovery target is not given so exit */
elog(INFO, "Backup %s WAL segments are valid", backup_id);
@ -369,22 +492,15 @@ validate_wal(pgBackup *backup,
* up to the given recovery target.
* In any case we cannot restore to the point before stop_lsn.
*/
private.archivedir = archivedir;
private.tli = tli;
xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, &private);
if (xlogreader == NULL)
elog(ERROR, "out of memory");
/* We will check it in the end */
xlogfpath[0] = '\0';
xlogreader = InitXLogPageRead(&private, archivedir, tli, true);
/* We can restore at least up to the backup end */
time2iso(last_timestamp, lengthof(last_timestamp), backup->recovery_time);
last_xid = backup->recovery_xid;
if ((TransactionIdIsValid(target_xid) && target_xid == last_xid)
|| (target_time != 0 && backup->recovery_time >= target_time))
|| (target_time != 0 && backup->recovery_time >= target_time)
|| (XRecOffIsValid(target_lsn) && backup->stop_lsn >= target_lsn))
all_wal = true;
startpoint = backup->stop_lsn;
@ -439,21 +555,7 @@ validate_wal(pgBackup *backup,
/* Some needed WAL records are absent */
else
{
if (xlogfpath[0] != 0)
{
/* XLOG reader couldn't read WAL segment.
* We throw a WARNING here to be able to update backup status below.
*/
if (!xlogexists)
{
elog(WARNING, "WAL segment \"%s\" is absent", xlogfpath);
}
else if (xlogreadfd != -1)
{
elog(WARNING, "Possible WAL CORRUPTION."
"Error has occured during reading WAL segment \"%s\"", xlogfpath);
}
}
PrintXLogCorruptionMsg(&private, WARNING);
elog(WARNING, "recovery can be done up to time %s and xid " XID_FMT,
last_timestamp, last_xid);
@ -470,16 +572,14 @@ validate_wal(pgBackup *backup,
else if (target_time != 0)
elog(ERROR, "not enough WAL records to time %s",
target_timestamp);
else if (XRecOffIsValid(target_lsn))
elog(ERROR, "not enough WAL records to lsn %X/%X",
(uint32) (target_lsn >> 32), (uint32) (target_lsn));
}
/* clean */
CleanupXLogPageRead(xlogreader);
XLogReaderFree(xlogreader);
if (xlogreadfd != -1)
{
close(xlogreadfd);
xlogreadfd = -1;
xlogexists = false;
}
}
/*
@ -505,12 +605,7 @@ read_recovery_info(const char *archivedir, TimeLineID tli,
elog(ERROR, "Invalid stop_lsn value %X/%X",
(uint32) (stop_lsn >> 32), (uint32) (stop_lsn));
private.archivedir = archivedir;
private.tli = tli;
xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, &private);
if (xlogreader == NULL)
elog(ERROR, "out of memory");
xlogreader = InitXLogPageRead(&private, archivedir, tli, true);
/* Read records from stop_lsn down to start_lsn */
do
@ -553,13 +648,8 @@ read_recovery_info(const char *archivedir, TimeLineID tli,
res = false;
cleanup:
CleanupXLogPageRead(xlogreader);
XLogReaderFree(xlogreader);
if (xlogreadfd != -1)
{
close(xlogreadfd);
xlogreadfd = -1;
xlogexists = false;
}
return res;
}
@ -581,23 +671,13 @@ wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn,
elog(ERROR, "Invalid target_lsn value %X/%X",
(uint32) (target_lsn >> 32), (uint32) (target_lsn));
private.archivedir = archivedir;
private.tli = target_tli;
xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, &private);
if (xlogreader == NULL)
elog(ERROR, "out of memory");
xlogreader = InitXLogPageRead(&private, archivedir, target_tli, true);
res = XLogReadRecord(xlogreader, target_lsn, &errormsg) != NULL;
/* Didn't find 'target_lsn' and there is no error, return false */
CleanupXLogPageRead(xlogreader);
XLogReaderFree(xlogreader);
if (xlogreadfd != -1)
{
close(xlogreadfd);
xlogreadfd = -1;
xlogexists = false;
}
return res;
}
@ -626,54 +706,53 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
int reqLen, XLogRecPtr targetRecPtr, char *readBuf,
TimeLineID *pageTLI)
{
XLogPageReadPrivate *private = (XLogPageReadPrivate *) xlogreader->private_data;
XLogPageReadPrivate *private_data;
uint32 targetPageOff;
private_data = (XLogPageReadPrivate *) xlogreader->private_data;
targetPageOff = targetPagePtr % XLogSegSize;
/*
* See if we need to switch to a new segment because the requested record
* is not in the currently open one.
*/
if (!XLByteInSeg(targetPagePtr, xlogreadsegno))
if (!XLByteInSeg(targetPagePtr, private_data->xlogsegno))
{
if (xlogreadfd >= 0)
CleanupXLogPageRead(xlogreader);
/*
* Do not switch to next WAL segment in this function. Currently it is
* manually switched only in doExtractPageMap().
*/
if (private_data->manual_switch)
{
close(xlogreadfd);
xlogreadfd = -1;
xlogexists = false;
private_data->need_switch = true;
return -1;
}
#ifdef HAVE_LIBZ
else if (gz_xlogread != NULL)
{
gzclose(gz_xlogread);
gz_xlogread = NULL;
xlogexists = false;
}
#endif
}
XLByteToSeg(targetPagePtr, xlogreadsegno);
XLByteToSeg(targetPagePtr, private_data->xlogsegno);
if (!xlogexists)
/* Try to switch to the next WAL segment */
if (!private_data->xlogexists)
{
char xlogfname[MAXFNAMELEN];
XLogFileName(xlogfname, private->tli, xlogreadsegno);
snprintf(xlogfpath, MAXPGPATH, "%s/%s", private->archivedir,
xlogfname);
XLogFileName(xlogfname, private_data->tli, private_data->xlogsegno);
snprintf(private_data->xlogpath, MAXPGPATH, "%s/%s",
private_data->archivedir, xlogfname);
if (fileExists(xlogfpath))
if (fileExists(private_data->xlogpath))
{
elog(LOG, "Opening WAL segment \"%s\"", xlogfpath);
elog(LOG, "Opening WAL segment \"%s\"", private_data->xlogpath);
xlogexists = true;
xlogreadfd = open(xlogfpath, O_RDONLY | PG_BINARY, 0);
private_data->xlogexists = true;
private_data->xlogfile = open(private_data->xlogpath,
O_RDONLY | PG_BINARY, 0);
if (xlogreadfd < 0)
if (private_data->xlogfile < 0)
{
elog(WARNING, "Could not open WAL segment \"%s\": %s",
xlogfpath, strerror(errno));
private_data->xlogpath, strerror(errno));
return -1;
}
}
@ -681,17 +760,21 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
/* Try to open compressed WAL segment */
else
{
snprintf(gz_xlogfpath, sizeof(gz_xlogfpath), "%s.gz", xlogfpath);
if (fileExists(gz_xlogfpath))
snprintf(private_data->gz_xlogpath,
sizeof(private_data->gz_xlogpath), "%s.gz",
private_data->xlogpath);
if (fileExists(private_data->gz_xlogpath))
{
elog(LOG, "Opening compressed WAL segment \"%s\"", gz_xlogfpath);
elog(LOG, "Opening compressed WAL segment \"%s\"",
private_data->gz_xlogpath);
xlogexists = true;
gz_xlogread = gzopen(gz_xlogfpath, "rb");
if (gz_xlogread == NULL)
private_data->xlogexists = true;
private_data->gz_xlogfile = gzopen(private_data->gz_xlogpath,
"rb");
if (private_data->gz_xlogfile == NULL)
{
elog(WARNING, "Could not open compressed WAL segment \"%s\": %s",
gz_xlogfpath, strerror(errno));
private_data->gz_xlogpath, strerror(errno));
return -1;
}
}
@ -699,55 +782,129 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
#endif
/* Exit without error if WAL segment doesn't exist */
if (!xlogexists)
if (!private_data->xlogexists)
return -1;
}
/*
* At this point, we have the right segment open.
*/
Assert(xlogexists);
Assert(private_data->xlogexists);
/* Read the requested page */
if (xlogreadfd != -1)
if (private_data->xlogfile != -1)
{
if (lseek(xlogreadfd, (off_t) targetPageOff, SEEK_SET) < 0)
if (lseek(private_data->xlogfile, (off_t) targetPageOff, SEEK_SET) < 0)
{
elog(WARNING, "Could not seek in WAL segment \"%s\": %s",
xlogfpath, strerror(errno));
private_data->xlogpath, strerror(errno));
return -1;
}
if (read(xlogreadfd, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
if (read(private_data->xlogfile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
{
elog(WARNING, "Could not read from WAL segment \"%s\": %s",
xlogfpath, strerror(errno));
private_data->xlogpath, strerror(errno));
return -1;
}
}
#ifdef HAVE_LIBZ
else
{
if (gzseek(gz_xlogread, (z_off_t) targetPageOff, SEEK_SET) == -1)
if (gzseek(private_data->gz_xlogfile, (z_off_t) targetPageOff, SEEK_SET) == -1)
{
elog(WARNING, "Could not seek in compressed WAL segment \"%s\": %s",
gz_xlogfpath, get_gz_error(gz_xlogread));
private_data->gz_xlogpath,
get_gz_error(private_data->gz_xlogfile));
return -1;
}
if (gzread(gz_xlogread, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
if (gzread(private_data->gz_xlogfile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
{
elog(WARNING, "Could not read from compressed WAL segment \"%s\": %s",
gz_xlogfpath, get_gz_error(gz_xlogread));
private_data->gz_xlogpath,
get_gz_error(private_data->gz_xlogfile));
return -1;
}
}
#endif
*pageTLI = private->tli;
*pageTLI = private_data->tli;
return XLOG_BLCKSZ;
}
/*
* Initialize WAL segments reading.
*/
static XLogReaderState *
InitXLogPageRead(XLogPageReadPrivate *private_data, const char *archivedir,
TimeLineID tli, bool allocate_reader)
{
XLogReaderState *xlogreader = NULL;
MemSet(private_data, 0, sizeof(XLogPageReadPrivate));
private_data->archivedir = archivedir;
private_data->tli = tli;
private_data->xlogfile = -1;
if (allocate_reader)
{
xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, private_data);
if (xlogreader == NULL)
elog(ERROR, "out of memory");
}
return xlogreader;
}
/*
* Cleanup after WAL segment reading.
*/
static void
CleanupXLogPageRead(XLogReaderState *xlogreader)
{
XLogPageReadPrivate *private_data;
private_data = (XLogPageReadPrivate *) xlogreader->private_data;
if (private_data->xlogfile >= 0)
{
close(private_data->xlogfile);
private_data->xlogfile = -1;
}
#ifdef HAVE_LIBZ
else if (private_data->gz_xlogfile != NULL)
{
gzclose(private_data->gz_xlogfile);
private_data->gz_xlogfile = NULL;
}
#endif
private_data->xlogexists = false;
}
static void
PrintXLogCorruptionMsg(XLogPageReadPrivate *private_data, int elevel)
{
if (private_data->xlogpath[0] != 0)
{
/*
* XLOG reader couldn't read WAL segment.
* We throw a WARNING here to be able to update backup status.
*/
if (!private_data->xlogexists)
elog(elevel, "WAL segment \"%s\" is absent", private_data->xlogpath);
else if (private_data->xlogfile != -1)
elog(elevel, "Possible WAL corruption. "
"Error has occured during reading WAL segment \"%s\"",
private_data->xlogpath);
#ifdef HAVE_LIBZ
else if (private_data->gz_xlogpath != NULL)
elog(elevel, "Possible WAL corruption. "
"Error has occured during reading WAL segment \"%s\"",
private_data->gz_xlogpath);
#endif
}
}
/*
* Extract information about blocks modified in this record.
*/

View File

@ -10,14 +10,16 @@
#include "pg_probackup.h"
#include "streamutil.h"
#include "utils/thread.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/stat.h>
#include <unistd.h>
#include "pg_getopt.h"
const char *PROGRAM_VERSION = "2.0.17";
const char *PROGRAM_VERSION = "2.0.18";
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
@ -36,7 +38,7 @@ char backup_instance_path[MAXPGPATH];
char arclog_path[MAXPGPATH] = "";
/* common options */
char *backup_id_string_param = NULL;
static char *backup_id_string = NULL;
int num_threads = 1;
bool stream_wal = false;
bool progress = false;
@ -47,28 +49,29 @@ char *replication_slot = NULL;
/* backup options */
bool backup_logs = false;
bool smooth_checkpoint;
bool from_replica = false;
bool is_remote_backup = false;
/* Wait timeout for WAL segment archiving */
uint32 archive_timeout = 300; /* default is 300 seconds */
uint32 archive_timeout = ARCHIVE_TIMEOUT_DEFAULT;
const char *master_db = NULL;
const char *master_host = NULL;
const char *master_port= NULL;
const char *master_user = NULL;
uint32 replica_timeout = 300; /* default is 300 seconds */
uint32 replica_timeout = REPLICA_TIMEOUT_DEFAULT;
/* restore options */
static char *target_time;
static char *target_xid;
static char *target_lsn;
static char *target_inclusive;
static TimeLineID target_tli;
static bool target_immediate;
static char *target_name = NULL;
static char *target_action = NULL;;
static char *target_action = NULL;
static pgRecoveryTarget *recovery_target_options = NULL;
bool restore_as_replica = false;
bool restore_no_validate = false;
/* delete options */
bool delete_wal = false;
@ -81,10 +84,11 @@ uint32 retention_redundancy = 0;
uint32 retention_window = 0;
/* compression options */
CompressAlg compress_alg = NOT_DEFINED_COMPRESS;
int compress_level = DEFAULT_COMPRESS_LEVEL;
CompressAlg compress_alg = COMPRESS_ALG_DEFAULT;
int compress_level = COMPRESS_LEVEL_DEFAULT;
bool compress_shortcut = false;
/* other options */
char *instance_name;
uint64 system_identifier = 0;
@ -94,30 +98,34 @@ static char *wal_file_path;
static char *wal_file_name;
static bool file_overwrite = false;
/* show options */
ShowFormat show_format = SHOW_PLAIN;
/* current settings */
pgBackup current;
ProbackupSubcmd backup_subcmd;
ProbackupSubcmd backup_subcmd = NO_CMD;
bool help = false;
static bool help_opt = false;
static void opt_backup_mode(pgut_option *opt, const char *arg);
static void opt_log_level_console(pgut_option *opt, const char *arg);
static void opt_log_level_file(pgut_option *opt, const char *arg);
static void opt_compress_alg(pgut_option *opt, const char *arg);
static void opt_show_format(pgut_option *opt, const char *arg);
static void compress_init(void);
static pgut_option options[] =
{
/* directory options */
{ 'b', 1, "help", &help, SOURCE_CMDLINE },
{ 'b', 1, "help", &help_opt, SOURCE_CMDLINE },
{ 's', 'D', "pgdata", &pgdata, SOURCE_CMDLINE },
{ 's', 'B', "backup-path", &backup_path, SOURCE_CMDLINE },
/* common options */
{ 'u', 'j', "threads", &num_threads, SOURCE_CMDLINE },
{ 'b', 2, "stream", &stream_wal, SOURCE_CMDLINE },
{ 'b', 3, "progress", &progress, SOURCE_CMDLINE },
{ 's', 'i', "backup-id", &backup_id_string_param, SOURCE_CMDLINE },
{ 's', 'i', "backup-id", &backup_id_string, SOURCE_CMDLINE },
/* backup options */
{ 'b', 10, "backup-pg-log", &backup_logs, SOURCE_CMDLINE },
{ 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMDLINE },
@ -143,12 +151,14 @@ static pgut_option options[] =
{ 's', 25, "recovery-target-name", &target_name, SOURCE_CMDLINE },
{ 's', 26, "recovery-target-action", &target_action, SOURCE_CMDLINE },
{ 'b', 'R', "restore-as-replica", &restore_as_replica, SOURCE_CMDLINE },
{ 'b', 27, "no-validate", &restore_no_validate, SOURCE_CMDLINE },
{ 's', 28, "lsn", &target_lsn, SOURCE_CMDLINE },
/* delete options */
{ 'b', 130, "wal", &delete_wal, SOURCE_CMDLINE },
{ 'b', 131, "expired", &delete_expired, SOURCE_CMDLINE },
{ 'b', 132, "all", &apply_to_all, SOURCE_CMDLINE },
/* TODO not implemented yet */
{ 'b', 133, "force", &force_delete, SOURCE_CMDLINE },
{ 'b', 133, "force", &force_delete, SOURCE_CMDLINE },
/* retention options */
{ 'u', 134, "retention-redundancy", &retention_redundancy, SOURCE_CMDLINE },
{ 'u', 135, "retention-window", &retention_window, SOURCE_CMDLINE },
@ -178,6 +188,8 @@ static pgut_option options[] =
{ 's', 160, "wal-file-path", &wal_file_path, SOURCE_CMDLINE },
{ 's', 161, "wal-file-name", &wal_file_name, SOURCE_CMDLINE },
{ 'b', 162, "overwrite", &file_overwrite, SOURCE_CMDLINE },
/* show options */
{ 'f', 170, "format", opt_show_format, SOURCE_CMDLINE },
{ 0 }
};
@ -187,14 +199,14 @@ static pgut_option options[] =
int
main(int argc, char *argv[])
{
char *command = NULL;
char path[MAXPGPATH];
char *command = NULL,
*command_name;
/* Check if backup_path is directory. */
struct stat stat_buf;
int rc;
/* initialize configuration */
pgBackup_init(&current);
pgBackupInit(&current);
PROGRAM_NAME = get_progname(argv[0]);
set_pglocale_pgservice(argv[0], "pgscripts");
@ -202,42 +214,40 @@ main(int argc, char *argv[])
/*
* Save main thread's tid. It is used call exit() in case of errors.
*/
#ifdef WIN32
main_tid = GetCurrentThreadId();
#else
main_tid = pthread_self();
#endif
/* Parse subcommands and non-subcommand options */
if (argc > 1)
{
if (strcmp(argv[1], "archive-push") == 0)
backup_subcmd = ARCHIVE_PUSH;
backup_subcmd = ARCHIVE_PUSH_CMD;
else if (strcmp(argv[1], "archive-get") == 0)
backup_subcmd = ARCHIVE_GET;
backup_subcmd = ARCHIVE_GET_CMD;
else if (strcmp(argv[1], "add-instance") == 0)
backup_subcmd = ADD_INSTANCE;
backup_subcmd = ADD_INSTANCE_CMD;
else if (strcmp(argv[1], "del-instance") == 0)
backup_subcmd = DELETE_INSTANCE;
backup_subcmd = DELETE_INSTANCE_CMD;
else if (strcmp(argv[1], "init") == 0)
backup_subcmd = INIT;
backup_subcmd = INIT_CMD;
else if (strcmp(argv[1], "backup") == 0)
backup_subcmd = BACKUP;
backup_subcmd = BACKUP_CMD;
else if (strcmp(argv[1], "restore") == 0)
backup_subcmd = RESTORE;
backup_subcmd = RESTORE_CMD;
else if (strcmp(argv[1], "validate") == 0)
backup_subcmd = VALIDATE;
else if (strcmp(argv[1], "show") == 0)
backup_subcmd = SHOW;
backup_subcmd = VALIDATE_CMD;
else if (strcmp(argv[1], "delete") == 0)
backup_subcmd = DELETE;
backup_subcmd = DELETE_CMD;
else if (strcmp(argv[1], "merge") == 0)
backup_subcmd = MERGE_CMD;
else if (strcmp(argv[1], "show") == 0)
backup_subcmd = SHOW_CMD;
else if (strcmp(argv[1], "set-config") == 0)
backup_subcmd = SET_CONFIG;
backup_subcmd = SET_CONFIG_CMD;
else if (strcmp(argv[1], "show-config") == 0)
backup_subcmd = SHOW_CONFIG;
else if (strcmp(argv[1], "--help") == 0
|| strcmp(argv[1], "help") == 0
|| strcmp(argv[1], "-?") == 0)
backup_subcmd = SHOW_CONFIG_CMD;
else if (strcmp(argv[1], "--help") == 0 ||
strcmp(argv[1], "-?") == 0 ||
strcmp(argv[1], "help") == 0)
{
if (argc > 2)
help_command(argv[2]);
@ -248,35 +258,33 @@ main(int argc, char *argv[])
|| strcmp(argv[1], "version") == 0
|| strcmp(argv[1], "-V") == 0)
{
if (argc == 2)
{
#ifdef PGPRO_VERSION
fprintf(stderr, "%s %s (Postgres Pro %s %s)\n",
PROGRAM_NAME, PROGRAM_VERSION,
PGPRO_VERSION, PGPRO_EDITION);
fprintf(stderr, "%s %s (Postgres Pro %s %s)\n",
PROGRAM_NAME, PROGRAM_VERSION,
PGPRO_VERSION, PGPRO_EDITION);
#else
fprintf(stderr, "%s %s (PostgreSQL %s)\n",
PROGRAM_NAME, PROGRAM_VERSION, PG_VERSION);
fprintf(stderr, "%s %s (PostgreSQL %s)\n",
PROGRAM_NAME, PROGRAM_VERSION, PG_VERSION);
#endif
exit(0);
}
else if (strcmp(argv[2], "--help") == 0)
help_command(argv[1]);
else
elog(ERROR, "Invalid arguments for \"%s\" subcommand", argv[1]);
exit(0);
}
else
elog(ERROR, "Unknown subcommand");
elog(ERROR, "Unknown subcommand \"%s\"", argv[1]);
}
if (backup_subcmd == NO_CMD)
elog(ERROR, "No subcommand specified");
/*
* Make command string before getopt_long() will call. It permutes the
* content of argv.
*/
if (backup_subcmd == BACKUP ||
backup_subcmd == RESTORE ||
backup_subcmd == VALIDATE ||
backup_subcmd == DELETE)
command_name = pstrdup(argv[1]);
if (backup_subcmd == BACKUP_CMD ||
backup_subcmd == RESTORE_CMD ||
backup_subcmd == VALIDATE_CMD ||
backup_subcmd == DELETE_CMD ||
backup_subcmd == MERGE_CMD)
{
int i,
len = 0,
@ -303,11 +311,12 @@ main(int argc, char *argv[])
command[len] = '\0';
}
optind += 1;
/* Parse command line arguments */
pgut_getopt(argc, argv, options);
if (help)
help_command(argv[2]);
if (help_opt)
help_command(command_name);
/* backup_path is required for all pg_probackup commands except help */
if (backup_path == NULL)
@ -320,6 +329,7 @@ main(int argc, char *argv[])
if (backup_path == NULL)
elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)");
}
canonicalize_path(backup_path);
/* Ensure that backup_path is an absolute path */
if (!is_absolute_path(backup_path))
@ -340,7 +350,8 @@ main(int argc, char *argv[])
}
/* Option --instance is required for all commands except init and show */
if (backup_subcmd != INIT && backup_subcmd != SHOW && backup_subcmd != VALIDATE)
if (backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD &&
backup_subcmd != VALIDATE_CMD)
{
if (instance_name == NULL)
elog(ERROR, "required parameter not specified: --instance");
@ -352,7 +363,8 @@ main(int argc, char *argv[])
*/
if (instance_name)
{
sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name);
sprintf(backup_instance_path, "%s/%s/%s",
backup_path, BACKUPS_DIR, instance_name);
sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name);
/*
@ -360,7 +372,7 @@ main(int argc, char *argv[])
* for all commands except init, which doesn't take this parameter
* and add-instance which creates new instance.
*/
if (backup_subcmd != INIT && backup_subcmd != ADD_INSTANCE)
if (backup_subcmd != INIT_CMD && backup_subcmd != ADD_INSTANCE_CMD)
{
if (access(backup_instance_path, F_OK) != 0)
elog(ERROR, "Instance '%s' does not exist in this backup catalog",
@ -372,8 +384,10 @@ main(int argc, char *argv[])
* Read options from env variables or from config file,
* unless we're going to set them via set-config.
*/
if (instance_name && backup_subcmd != SET_CONFIG)
if (instance_name && backup_subcmd != SET_CONFIG_CMD)
{
char path[MAXPGPATH];
/* Read environment variables */
pgut_getopt_env(options);
@ -393,18 +407,19 @@ main(int argc, char *argv[])
elog(ERROR, "-D, --pgdata must be an absolute path");
/* Sanity check of --backup-id option */
if (backup_id_string_param != NULL)
if (backup_id_string != NULL)
{
if (backup_subcmd != RESTORE
&& backup_subcmd != VALIDATE
&& backup_subcmd != DELETE
&& backup_subcmd != SHOW)
elog(ERROR, "Cannot use -i (--backup-id) option together with the '%s' command",
argv[1]);
if (backup_subcmd != RESTORE_CMD &&
backup_subcmd != VALIDATE_CMD &&
backup_subcmd != DELETE_CMD &&
backup_subcmd != MERGE_CMD &&
backup_subcmd != SHOW_CMD)
elog(ERROR, "Cannot use -i (--backup-id) option together with the \"%s\" command",
command_name);
current.backup_id = base36dec(backup_id_string_param);
current.backup_id = base36dec(backup_id_string);
if (current.backup_id == 0)
elog(ERROR, "Invalid backup-id");
elog(ERROR, "Invalid backup-id \"%s\"", backup_id_string);
}
/* Setup stream options. They are used in streamutil.c. */
@ -426,12 +441,12 @@ main(int argc, char *argv[])
pgdata_exclude_dir[i] = "pg_log";
}
if (backup_subcmd == VALIDATE || backup_subcmd == RESTORE)
if (backup_subcmd == VALIDATE_CMD || backup_subcmd == RESTORE_CMD)
{
/* parse all recovery target options into recovery_target_options structure */
recovery_target_options = parseRecoveryTargetOptions(target_time, target_xid,
target_inclusive, target_tli, target_immediate,
target_name, target_action);
target_inclusive, target_tli, target_lsn, target_immediate,
target_name, target_action, restore_no_validate);
}
if (num_threads < 1)
@ -442,23 +457,24 @@ main(int argc, char *argv[])
/* do actual operation */
switch (backup_subcmd)
{
case ARCHIVE_PUSH:
case ARCHIVE_PUSH_CMD:
return do_archive_push(wal_file_path, wal_file_name, file_overwrite);
case ARCHIVE_GET:
case ARCHIVE_GET_CMD:
return do_archive_get(wal_file_path, wal_file_name);
case ADD_INSTANCE:
case ADD_INSTANCE_CMD:
return do_add_instance();
case DELETE_INSTANCE:
case DELETE_INSTANCE_CMD:
return do_delete_instance();
case INIT:
case INIT_CMD:
return do_init();
case BACKUP:
case BACKUP_CMD:
{
const char *backup_mode;
time_t start_time;
start_time = time(NULL);
backup_mode = deparse_backup_mode(current.backup_mode);
current.stream = stream_wal;
elog(INFO, "Backup start, pg_probackup version: %s, backup ID: %s, backup mode: %s, instance: %s, stream: %s, remote: %s",
PROGRAM_VERSION, base36enc(start_time), backup_mode, instance_name,
@ -466,34 +482,40 @@ main(int argc, char *argv[])
return do_backup(start_time);
}
case RESTORE:
case RESTORE_CMD:
return do_restore_or_validate(current.backup_id,
recovery_target_options,
true);
case VALIDATE:
case VALIDATE_CMD:
if (current.backup_id == 0 && target_time == 0 && target_xid == 0)
return do_validate_all();
else
return do_restore_or_validate(current.backup_id,
recovery_target_options,
false);
case SHOW:
case SHOW_CMD:
return do_show(current.backup_id);
case DELETE:
if (delete_expired && backup_id_string_param)
case DELETE_CMD:
if (delete_expired && backup_id_string)
elog(ERROR, "You cannot specify --delete-expired and --backup-id options together");
if (!delete_expired && !delete_wal && !backup_id_string_param)
if (!delete_expired && !delete_wal && !backup_id_string)
elog(ERROR, "You must specify at least one of the delete options: --expired |--wal |--backup_id");
if (delete_wal && !delete_expired && !backup_id_string_param)
if (delete_wal && !delete_expired && !backup_id_string)
return do_retention_purge();
if (delete_expired)
return do_retention_purge();
else
return do_delete(current.backup_id);
case SHOW_CONFIG:
case MERGE_CMD:
do_merge(current.backup_id);
break;
case SHOW_CONFIG_CMD:
return do_configure(true);
case SET_CONFIG:
case SET_CONFIG_CMD:
return do_configure(false);
case NO_CMD:
/* Should not happen */
elog(ERROR, "Unknown subcommand");
}
return 0;
@ -517,49 +539,31 @@ opt_log_level_file(pgut_option *opt, const char *arg)
log_level_file = parse_log_level(arg);
}
CompressAlg
parse_compress_alg(const char *arg)
static void
opt_show_format(pgut_option *opt, const char *arg)
{
const char *v = arg;
size_t len;
/* Skip all spaces detected */
while (isspace((unsigned char)*arg))
arg++;
len = strlen(arg);
while (IsSpace(*v))
v++;
len = strlen(v);
if (len == 0)
elog(ERROR, "compress algrorithm is empty");
if (pg_strncasecmp("zlib", arg, len) == 0)
return ZLIB_COMPRESS;
else if (pg_strncasecmp("pglz", arg, len) == 0)
return PGLZ_COMPRESS;
else if (pg_strncasecmp("none", arg, len) == 0)
return NONE_COMPRESS;
else
elog(ERROR, "invalid compress algorithm value \"%s\"", arg);
return NOT_DEFINED_COMPRESS;
}
const char*
deparse_compress_alg(int alg)
{
switch (alg)
if (len > 0)
{
case NONE_COMPRESS:
case NOT_DEFINED_COMPRESS:
return "none";
case ZLIB_COMPRESS:
return "zlib";
case PGLZ_COMPRESS:
return "pglz";
if (pg_strncasecmp("plain", v, len) == 0)
show_format = SHOW_PLAIN;
else if (pg_strncasecmp("json", v, len) == 0)
show_format = SHOW_JSON;
else
elog(ERROR, "Invalid show format \"%s\"", arg);
}
return NULL;
else
elog(ERROR, "Invalid show format \"%s\"", arg);
}
void
static void
opt_compress_alg(pgut_option *opt, const char *arg)
{
compress_alg = parse_compress_alg(arg);
@ -568,17 +572,17 @@ opt_compress_alg(pgut_option *opt, const char *arg)
/*
* Initialize compress and sanity checks for compress.
*/
static
void compress_init(void)
static void
compress_init(void)
{
/* Default algorithm is zlib */
if (compress_shortcut)
compress_alg = ZLIB_COMPRESS;
if (backup_subcmd != SET_CONFIG)
if (backup_subcmd != SET_CONFIG_CMD)
{
if (compress_level != DEFAULT_COMPRESS_LEVEL
&& compress_alg == NONE_COMPRESS)
if (compress_level != COMPRESS_LEVEL_DEFAULT
&& compress_alg == NOT_DEFINED_COMPRESS)
elog(ERROR, "Cannot specify compress-level option without compress-alg option");
}
@ -588,7 +592,7 @@ void compress_init(void)
if (compress_level == 0)
compress_alg = NOT_DEFINED_COMPRESS;
if (backup_subcmd == BACKUP || backup_subcmd == ARCHIVE_PUSH)
if (backup_subcmd == BACKUP_CMD || backup_subcmd == ARCHIVE_PUSH_CMD)
{
#ifndef HAVE_LIBZ
if (compress_alg == ZLIB_COMPRESS)

View File

@ -15,10 +15,6 @@
#include <limits.h>
#include <libpq-fe.h>
#ifndef WIN32
#include <sys/mman.h>
#endif
#include "access/timeline.h"
#include "access/xlogdefs.h"
#include "access/xlog_internal.h"
@ -28,6 +24,13 @@
#include "storage/checksum.h"
#include "utils/pg_crc.h"
#include "common/relpath.h"
#include "port.h"
#ifdef FRONTEND
#undef FRONTEND
#include "port/atomics.h"
#define FRONTEND
#endif
#include "utils/parray.h"
#include "utils/pgut.h"
@ -60,6 +63,8 @@
#define PG_BLACK_LIST "black_list"
#define PG_TABLESPACE_MAP_FILE "tablespace_map"
#define LOG_FILENAME_DEFAULT "pg_probackup.log"
#define LOG_DIRECTORY_DEFAULT "log"
/* Direcotry/File permission */
#define DIR_PERMISSION (0700)
#define FILE_PERMISSION (0600)
@ -85,9 +90,10 @@ typedef struct pgFile
size_t size; /* size of the file */
size_t read_size; /* size of the portion read (if only some pages are
backed up, it's different from size) */
size_t write_size; /* size of the backed-up file. BYTES_INVALID means
int64 write_size; /* size of the backed-up file. BYTES_INVALID means
that the file existed but was not backed up
because not modified since last backup. */
/* we need int64 here to store '-1' value */
pg_crc32 crc; /* CRC value of the file, regular file only */
char *linked; /* path of the linked file */
bool is_datafile; /* true if the file is PostgreSQL data file */
@ -102,21 +108,23 @@ typedef struct pgFile
bool is_database;
bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */
CompressAlg compress_alg; /* compression algorithm applied to the file */
volatile uint32 lock; /* lock for synchronization of parallel threads */
volatile pg_atomic_flag lock; /* lock for synchronization of parallel threads */
datapagemap_t pagemap; /* bitmap of pages updated since previous backup */
bool pagemap_isabsent; /* Used to mark files with unknown state of pagemap,
* i.e. datafiles without _ptrack */
} pgFile;
/* Special values of datapagemap_t bitmapsize */
#define PageBitmapIsEmpty 0 /* Used to mark unchanged datafiles */
#define PageBitmapIsAbsent -1 /* Used to mark files with unknown state of pagemap, i.e. datafiles without _ptrack */
/* Current state of backup */
typedef enum BackupStatus
{
BACKUP_STATUS_INVALID, /* the pgBackup is invalid */
BACKUP_STATUS_OK, /* completed backup */
BACKUP_STATUS_RUNNING, /* running backup */
BACKUP_STATUS_ERROR, /* aborted because of unexpected error */
BACKUP_STATUS_RUNNING, /* running backup */
BACKUP_STATUS_MERGING, /* merging backups */
BACKUP_STATUS_DELETING, /* data files are being deleted */
BACKUP_STATUS_DELETED, /* data files have been deleted */
BACKUP_STATUS_DONE, /* completed but not validated yet */
@ -135,24 +143,33 @@ typedef enum BackupMode
typedef enum ProbackupSubcmd
{
INIT = 0,
ARCHIVE_PUSH,
ARCHIVE_GET,
ADD_INSTANCE,
DELETE_INSTANCE,
BACKUP,
RESTORE,
VALIDATE,
SHOW,
DELETE,
SET_CONFIG,
SHOW_CONFIG
NO_CMD = 0,
INIT_CMD,
ADD_INSTANCE_CMD,
DELETE_INSTANCE_CMD,
ARCHIVE_PUSH_CMD,
ARCHIVE_GET_CMD,
BACKUP_CMD,
RESTORE_CMD,
VALIDATE_CMD,
DELETE_CMD,
MERGE_CMD,
SHOW_CMD,
SET_CONFIG_CMD,
SHOW_CONFIG_CMD
} ProbackupSubcmd;
typedef enum ShowFormat
{
SHOW_PLAIN,
SHOW_JSON
} ShowFormat;
/* special values of pgBackup fields */
#define INVALID_BACKUP_ID 0 /* backup ID is not provided by user */
#define INVALID_BACKUP_ID 0 /* backup ID is not provided by user */
#define BYTES_INVALID (-1)
#define BLOCKNUM_INVALID (-1)
typedef struct pgBackupConfig
{
@ -169,6 +186,8 @@ typedef struct pgBackupConfig
const char *master_user;
int replica_timeout;
int archive_timeout;
int log_level_console;
int log_level_file;
char *log_filename;
@ -184,6 +203,8 @@ typedef struct pgBackupConfig
int compress_level;
} pgBackupConfig;
typedef struct pgBackup pgBackup;
/* Information about single backup stored in backup.conf */
typedef struct pgBackup
{
@ -214,18 +235,24 @@ typedef struct pgBackup
/* Size of WAL files in archive needed to restore this backup */
int64 wal_bytes;
CompressAlg compress_alg;
int compress_level;
/* Fields needed for compatibility check */
uint32 block_size;
uint32 wal_block_size;
uint32 checksum_version;
char program_version[100];
char server_version[100];
bool stream; /* Was this backup taken in stream mode?
bool stream; /* Was this backup taken in stream mode?
* i.e. does it include all needed WAL files? */
bool from_replica; /* Was this backup taken from replica */
time_t parent_backup; /* Identifier of the previous backup.
* Which is basic backup for this
* incremental backup. */
pgBackup *parent_backup_link;
char *primary_conninfo; /* Connection parameters of the backup
* in the format suitable for recovery.conf */
} pgBackup;
@ -241,12 +268,17 @@ typedef struct pgRecoveryTarget
TransactionId recovery_target_xid;
/* add one more field in order to avoid deparsing recovery_target_xid back */
const char *target_xid_string;
bool lsn_specified;
XLogRecPtr recovery_target_lsn;
/* add one more field in order to avoid deparsing recovery_target_lsn back */
const char *target_lsn_string;
TimeLineID recovery_target_tli;
bool recovery_target_inclusive;
bool inclusive_specified;
bool recovery_target_immediate;
const char *recovery_target_name;
const char *recovery_target_action;
bool restore_no_validate;
} pgRecoveryTarget;
/* Union to ease operations on relation pages */
@ -260,18 +292,20 @@ typedef struct
{
const char *from_root;
const char *to_root;
parray *backup_files_list;
parray *prev_backup_filelist;
XLogRecPtr prev_backup_start_lsn;
PGconn *thread_backup_conn;
PGcancel *thread_cancel_conn;
parray *files_list;
parray *prev_filelist;
XLogRecPtr prev_start_lsn;
PGconn *backup_conn;
PGcancel *cancel_conn;
/*
* Return value from the thread.
* 0 means there is no error, 1 - there is an error.
*/
int ret;
} backup_files_args;
} backup_files_arg;
/*
* return pointer that exceeds the length of prefix from character string.
@ -309,13 +343,14 @@ extern char *replication_slot;
/* backup options */
extern bool smooth_checkpoint;
#define ARCHIVE_TIMEOUT_DEFAULT 300
extern uint32 archive_timeout;
extern bool from_replica;
extern bool is_remote_backup;
extern const char *master_db;
extern const char *master_host;
extern const char *master_port;
extern const char *master_user;
#define REPLICA_TIMEOUT_DEFAULT 300
extern uint32 replica_timeout;
extern bool is_ptrack_support;
@ -331,7 +366,10 @@ extern bool delete_expired;
extern bool apply_to_all;
extern bool force_delete;
/* retention options */
/* retention options. 0 disables the option */
#define RETENTION_REDUNDANCY_DEFAULT 0
#define RETENTION_WINDOW_DEFAULT 0
extern uint32 retention_redundancy;
extern uint32 retention_window;
@ -340,7 +378,8 @@ extern CompressAlg compress_alg;
extern int compress_level;
extern bool compress_shortcut;
#define DEFAULT_COMPRESS_LEVEL 6
#define COMPRESS_ALG_DEFAULT NOT_DEFINED_COMPRESS
#define COMPRESS_LEVEL_DEFAULT 1
extern CompressAlg parse_compress_alg(const char *arg);
extern const char* deparse_compress_alg(int alg);
@ -348,9 +387,12 @@ extern const char* deparse_compress_alg(int alg);
extern char *instance_name;
extern uint64 system_identifier;
/* show options */
extern ShowFormat show_format;
/* current settings */
extern pgBackup current;
extern ProbackupSubcmd backup_subcmd;
extern ProbackupSubcmd backup_subcmd;
/* in dir.c */
/* exclude directory list for $PGDATA file listing */
@ -363,7 +405,7 @@ extern const char *deparse_backup_mode(BackupMode mode);
extern void process_block_change(ForkNumber forknum, RelFileNode rnode,
BlockNumber blkno);
extern char *pg_ptrack_get_block(backup_files_args *arguments,
extern char *pg_ptrack_get_block(backup_files_arg *arguments,
Oid dbOid, Oid tblsOid, Oid relOid,
BlockNumber blknum,
size_t *result_size);
@ -377,10 +419,12 @@ extern bool satisfy_recovery_target(const pgBackup *backup,
extern parray * readTimeLineHistory_probackup(TimeLineID targetTLI);
extern pgRecoveryTarget *parseRecoveryTargetOptions(
const char *target_time, const char *target_xid,
const char *target_inclusive, TimeLineID target_tli, bool target_immediate,
const char *target_name, const char *target_action);
const char *target_inclusive, TimeLineID target_tli, const char* target_lsn,
bool target_immediate, const char *target_name,
const char *target_action, bool restore_no_validate);
extern void opt_tablespace_map(pgut_option *opt, const char *arg);
/* in merge.c */
extern void do_merge(time_t backup_id);
/* in init.c */
extern int do_init(void);
@ -432,21 +476,31 @@ extern pgBackup *catalog_get_last_data_backup(parray *backup_list,
extern void catalog_lock(void);
extern void pgBackupWriteControl(FILE *out, pgBackup *backup);
extern void pgBackupWriteBackupControlFile(pgBackup *backup);
extern void pgBackupWriteFileList(pgBackup *backup, parray *files,
const char *root);
extern void pgBackupGetPath(const pgBackup *backup, char *path, size_t len, const char *subdir);
extern void pgBackupGetPath2(const pgBackup *backup, char *path, size_t len,
const char *subdir1, const char *subdir2);
extern int pgBackupCreateDir(pgBackup *backup);
extern void pgBackupInit(pgBackup *backup);
extern void pgBackupCopy(pgBackup *dst, pgBackup *src);
extern void pgBackupFree(void *backup);
extern int pgBackupCompareId(const void *f1, const void *f2);
extern int pgBackupCompareIdDesc(const void *f1, const void *f2);
extern pgBackup* find_parent_backup(pgBackup *current_backup);
/* in dir.c */
extern void dir_list_file(parray *files, const char *root, bool exclude,
bool omit_symlink, bool add_root);
extern void list_data_directories(parray *files, const char *path,
bool is_root, bool exclude);
extern void create_data_directories(const char *data_dir,
const char *backup_dir,
bool extract_tablespaces);
extern void read_tablespace_map(parray *files, const char *backup_dir);
extern void opt_tablespace_map(pgut_option *opt, const char *arg);
extern void check_tablespace_mapping(pgBackup *backup);
extern void print_file_list(FILE *out, const parray *files, const char *root);
extern parray *dir_read_file_list(const char *root, const char *file_txt);
@ -455,26 +509,29 @@ extern int dir_create_dir(const char *path, mode_t mode);
extern bool dir_is_empty(const char *path);
extern bool fileExists(const char *path);
extern size_t pgFileSize(const char *path);
extern pgFile *pgFileNew(const char *path, bool omit_symlink);
extern pgFile *pgFileInit(const char *path);
extern void pgFileDelete(pgFile *file);
extern void pgFileFree(void *file);
extern pg_crc32 pgFileGetCRC(pgFile *file);
extern pg_crc32 pgFileGetCRC(const char *file_path);
extern int pgFileComparePath(const void *f1, const void *f2);
extern int pgFileComparePathDesc(const void *f1, const void *f2);
extern int pgFileCompareLinked(const void *f1, const void *f2);
extern int pgFileCompareSize(const void *f1, const void *f2);
/* in data.c */
extern bool backup_data_file(backup_files_args* arguments,
const char *from_root, const char *to_root,
pgFile *file, XLogRecPtr prev_backup_start_lsn,
BackupMode backup_mode);
extern void restore_data_file(const char *from_root, const char *to_root,
pgFile *file, pgBackup *backup);
extern bool copy_file(const char *from_root, const char *to_root,
pgFile *file);
extern bool backup_data_file(backup_files_arg* arguments,
const char *to_path, pgFile *file,
XLogRecPtr prev_backup_start_lsn,
BackupMode backup_mode,
CompressAlg calg, int clevel);
extern void restore_data_file(const char *to_path,
pgFile *file, bool allow_truncate,
bool write_header);
extern bool copy_file(const char *from_root, const char *to_root, pgFile *file);
extern void move_file(const char *from_root, const char *to_root, pgFile *file);
extern void push_wal_file(const char *from_path, const char *to_path,
bool is_compress, bool overwrite);
extern void get_wal_file(const char *from_path, const char *to_path);
@ -485,12 +542,13 @@ extern bool calc_file_checksum(pgFile *file);
extern void extractPageMap(const char *datadir,
XLogRecPtr startpoint,
TimeLineID tli,
XLogRecPtr endpoint, bool prev_segno,
XLogRecPtr endpoint, bool prev_seg,
parray *backup_files_list);
extern void validate_wal(pgBackup *backup,
const char *archivedir,
time_t target_time,
TransactionId target_xid,
XLogRecPtr target_lsn,
TimeLineID tli);
extern bool read_recovery_info(const char *archivedir, TimeLineID tli,
XLogRecPtr start_lsn, XLogRecPtr stop_lsn,
@ -513,9 +571,21 @@ extern long unsigned int base36dec(const char *text);
extern uint64 get_system_identifier(char *pgdata);
extern uint64 get_remote_system_identifier(PGconn *conn);
extern pg_time_t timestamptz_to_time_t(TimestampTz t);
extern void pgBackup_init(pgBackup *backup);
extern int parse_server_version(char *server_version_str);
/* in status.c */
extern bool is_pg_running(void);
#ifdef WIN32
#ifdef _DEBUG
#define lseek _lseek
#define open _open
#define fstat _fstat
#define read _read
#define close _close
#define write _write
#define mkdir(dir,mode) _mkdir(dir)
#endif
#endif
#endif /* PG_PROBACKUP_H */

View File

@ -17,76 +17,38 @@
#include <pthread.h>
#include "catalog/pg_control.h"
#include "utils/logger.h"
#include "utils/thread.h"
typedef struct
{
parray *files;
pgBackup *backup;
parray *files;
pgBackup *backup;
/*
* Return value from the thread.
* 0 means there is no error, 1 - there is an error.
*/
int ret;
} restore_files_args;
/* Tablespace mapping structures */
typedef struct TablespaceListCell
{
struct TablespaceListCell *next;
char old_dir[MAXPGPATH];
char new_dir[MAXPGPATH];
} TablespaceListCell;
typedef struct TablespaceList
{
TablespaceListCell *head;
TablespaceListCell *tail;
} TablespaceList;
typedef struct TablespaceCreatedListCell
{
struct TablespaceCreatedListCell *next;
char link_name[MAXPGPATH];
char linked_dir[MAXPGPATH];
} TablespaceCreatedListCell;
typedef struct TablespaceCreatedList
{
TablespaceCreatedListCell *head;
TablespaceCreatedListCell *tail;
} TablespaceCreatedList;
} restore_files_arg;
static void restore_backup(pgBackup *backup);
static void restore_directories(const char *pg_data_dir,
const char *backup_dir);
static void check_tablespace_mapping(pgBackup *backup);
static void create_recovery_conf(time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup);
static void restore_files(void *arg);
static void *restore_files(void *arg);
static void remove_deleted_files(pgBackup *backup);
static const char *get_tablespace_mapping(const char *dir);
static void set_tablespace_created(const char *link, const char *dir);
static const char *get_tablespace_created(const char *link);
/* Tablespace mapping */
static TablespaceList tablespace_dirs = {NULL, NULL};
static TablespaceCreatedList tablespace_created_dirs = {NULL, NULL};
/*
* Entry point of pg_probackup RESTORE and VALIDATE subcommands.
*/
int
do_restore_or_validate(time_t target_backup_id,
pgRecoveryTarget *rt,
bool is_restore)
do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
bool is_restore)
{
int i;
int i = 0;
parray *backups;
parray *timelines;
pgBackup *current_backup = NULL;
pgBackup *dest_backup = NULL;
pgBackup *base_full_backup = NULL;
@ -94,7 +56,7 @@ do_restore_or_validate(time_t target_backup_id,
int dest_backup_index = 0;
int base_full_backup_index = 0;
int corrupted_backup_index = 0;
char *action = is_restore ? "Restore":"Validate";
char *action = is_restore ? "Restore":"Validate";
if (is_restore)
{
@ -115,13 +77,12 @@ do_restore_or_validate(time_t target_backup_id,
catalog_lock();
/* Get list of all backups sorted in order of descending start time */
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
if (backups == NULL)
elog(ERROR, "Failed to get backup list.");
/* Find backup range we should restore or validate. */
for (i = 0; i < parray_num(backups); i++)
while ((i < parray_num(backups)) && !dest_backup)
{
current_backup = (pgBackup *) parray_get(backups, i);
i++;
/* Skip all backups which started after target backup */
if (target_backup_id && current_backup->start_time > target_backup_id)
@ -133,7 +94,6 @@ do_restore_or_validate(time_t target_backup_id,
*/
if (is_restore &&
!dest_backup &&
target_backup_id == INVALID_BACKUP_ID &&
current_backup->status != BACKUP_STATUS_OK)
{
@ -147,8 +107,7 @@ do_restore_or_validate(time_t target_backup_id,
* ensure that it satisfies recovery target.
*/
if ((target_backup_id == current_backup->start_time
|| target_backup_id == INVALID_BACKUP_ID)
&& !dest_backup)
|| target_backup_id == INVALID_BACKUP_ID))
{
/* backup is not ok,
@ -169,6 +128,8 @@ do_restore_or_validate(time_t target_backup_id,
if (rt->recovery_target_tli)
{
parray *timelines;
elog(LOG, "target timeline ID = %u", rt->recovery_target_tli);
/* Read timeline history files from archives */
timelines = readTimeLineHistory_probackup(rt->recovery_target_tli);
@ -199,37 +160,42 @@ do_restore_or_validate(time_t target_backup_id,
* Save it as dest_backup
*/
dest_backup = current_backup;
dest_backup_index = i;
dest_backup_index = i-1;
}
}
if (dest_backup == NULL)
elog(ERROR, "Backup satisfying target options is not found.");
/* If we already found dest_backup, look for full backup. */
if (dest_backup)
{
base_full_backup = current_backup;
if (current_backup->backup_mode != BACKUP_MODE_FULL)
{
base_full_backup = find_parent_backup(current_backup);
if (base_full_backup == NULL)
elog(ERROR, "Valid full backup for backup %s is not found.",
base36enc(current_backup->start_time));
}
/* If we already found dest_backup, look for full backup. */
if (dest_backup)
/*
* We have found full backup by link,
* now we need to walk the list to find its index.
*
* TODO I think we should rewrite it someday to use double linked list
* and avoid relying on sort order anymore.
*/
for (i = dest_backup_index; i < parray_num(backups); i++)
{
if (current_backup->backup_mode == BACKUP_MODE_FULL)
pgBackup * temp_backup = (pgBackup *) parray_get(backups, i);
if (temp_backup->start_time == base_full_backup->start_time)
{
if (current_backup->status != BACKUP_STATUS_OK)
{
/* Full backup revalidation can be done only for DONE and CORRUPT */
if (current_backup->status == BACKUP_STATUS_DONE ||
current_backup->status == BACKUP_STATUS_CORRUPT)
elog(WARNING, "base backup %s for given backup %s is in %s status, trying to revalidate",
base36enc_dup(current_backup->start_time),
base36enc_dup(dest_backup->start_time),
status2str(current_backup->status));
else
elog(ERROR, "base backup %s for given backup %s is in %s status",
base36enc_dup(current_backup->start_time),
base36enc_dup(dest_backup->start_time),
status2str(current_backup->status));
}
/* We found both dest and base backups. */
base_full_backup = current_backup;
base_full_backup_index = i;
break;
}
else
/* It`s ok to skip incremental backup */
continue;
}
}
@ -243,66 +209,71 @@ do_restore_or_validate(time_t target_backup_id,
if (is_restore)
check_tablespace_mapping(dest_backup);
if (dest_backup->backup_mode != BACKUP_MODE_FULL)
elog(INFO, "Validating parents for backup %s", base36enc(dest_backup->start_time));
/*
* Validate backups from base_full_backup to dest_backup.
*/
for (i = base_full_backup_index; i >= dest_backup_index; i--)
if (!is_restore || !rt->restore_no_validate)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
pgBackupValidate(backup);
/* Maybe we should be more paranoid and check for !BACKUP_STATUS_OK? */
if (backup->status == BACKUP_STATUS_CORRUPT)
{
corrupted_backup = backup;
corrupted_backup_index = i;
break;
}
/* We do not validate WAL files of intermediate backups
* It`s done to speed up restore
*/
}
/* There is no point in wal validation
* if there is corrupted backup between base_backup and dest_backup
*/
if (!corrupted_backup)
if (dest_backup->backup_mode != BACKUP_MODE_FULL)
elog(INFO, "Validating parents for backup %s", base36enc(dest_backup->start_time));
/*
* Validate corresponding WAL files.
* We pass base_full_backup timeline as last argument to this function,
* because it's needed to form the name of xlog file.
* Validate backups from base_full_backup to dest_backup.
*/
validate_wal(dest_backup, arclog_path, rt->recovery_target_time,
rt->recovery_target_xid, base_full_backup->tli);
/* Set every incremental backup between corrupted backup and nearest FULL backup as orphans */
if (corrupted_backup)
{
for (i = corrupted_backup_index - 1; i >= 0; i--)
for (i = base_full_backup_index; i >= dest_backup_index; i--)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
/* Mark incremental OK backup as orphan */
if (backup->backup_mode == BACKUP_MODE_FULL)
break;
if (backup->status != BACKUP_STATUS_OK)
continue;
else
pgBackupValidate(backup);
/* Maybe we should be more paranoid and check for !BACKUP_STATUS_OK? */
if (backup->status == BACKUP_STATUS_CORRUPT)
{
char *backup_id,
*corrupted_backup_id;
corrupted_backup = backup;
corrupted_backup_index = i;
break;
}
/* We do not validate WAL files of intermediate backups
* It`s done to speed up restore
*/
}
/* There is no point in wal validation
* if there is corrupted backup between base_backup and dest_backup
*/
if (!corrupted_backup)
/*
* Validate corresponding WAL files.
* We pass base_full_backup timeline as last argument to this function,
* because it's needed to form the name of xlog file.
*/
validate_wal(dest_backup, arclog_path, rt->recovery_target_time,
rt->recovery_target_xid, rt->recovery_target_lsn,
base_full_backup->tli);
backup->status = BACKUP_STATUS_ORPHAN;
pgBackupWriteBackupControlFile(backup);
/* Set every incremental backup between corrupted backup and nearest FULL backup as orphans */
if (corrupted_backup)
{
for (i = corrupted_backup_index - 1; i >= 0; i--)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
/* Mark incremental OK backup as orphan */
if (backup->backup_mode == BACKUP_MODE_FULL)
break;
if (backup->status != BACKUP_STATUS_OK)
continue;
else
{
char *backup_id,
*corrupted_backup_id;
backup_id = base36enc_dup(backup->start_time);
corrupted_backup_id = base36enc_dup(corrupted_backup->start_time);
backup->status = BACKUP_STATUS_ORPHAN;
pgBackupWriteBackupControlFile(backup);
elog(WARNING, "Backup %s is orphaned because his parent %s is corrupted",
backup_id, corrupted_backup_id);
backup_id = base36enc_dup(backup->start_time);
corrupted_backup_id = base36enc_dup(corrupted_backup->start_time);
free(backup_id);
free(corrupted_backup_id);
elog(WARNING, "Backup %s is orphaned because his parent %s is corrupted",
backup_id, corrupted_backup_id);
free(backup_id);
free(corrupted_backup_id);
}
}
}
}
@ -312,7 +283,12 @@ do_restore_or_validate(time_t target_backup_id,
* produce corresponding error message
*/
if (dest_backup->status == BACKUP_STATUS_OK)
elog(INFO, "Backup %s is valid.", base36enc(dest_backup->start_time));
{
if (rt->restore_no_validate)
elog(INFO, "Backup %s is used without validation.", base36enc(dest_backup->start_time));
else
elog(INFO, "Backup %s is valid.", base36enc(dest_backup->start_time));
}
else if (dest_backup->status == BACKUP_STATUS_CORRUPT)
elog(ERROR, "Backup %s is corrupt.", base36enc(dest_backup->start_time));
else if (dest_backup->status == BACKUP_STATUS_ORPHAN)
@ -327,6 +303,11 @@ do_restore_or_validate(time_t target_backup_id,
for (i = base_full_backup_index; i >= dest_backup_index; i--)
{
pgBackup *backup = (pgBackup *) parray_get(backups, i);
if (rt->lsn_specified && parse_server_version(backup->server_version) < 100000)
elog(ERROR, "Backup %s was created for version %s which doesn't support recovery_target_lsn",
base36enc(dest_backup->start_time), dest_backup->server_version);
restore_backup(backup);
}
@ -362,8 +343,9 @@ restore_backup(pgBackup *backup)
char list_path[MAXPGPATH];
parray *files;
int i;
pthread_t restore_threads[num_threads];
restore_files_args *restore_threads_args[num_threads];
/* arrays with meta info for multi threaded backup */
pthread_t *threads;
restore_files_arg *threads_args;
bool restore_isok = true;
if (backup->status != BACKUP_STATUS_OK)
@ -388,7 +370,7 @@ restore_backup(pgBackup *backup)
* this_backup_path = $BACKUP_PATH/backups/instance_name/backup_id
*/
pgBackupGetPath(backup, this_backup_path, lengthof(this_backup_path), NULL);
restore_directories(pgdata, this_backup_path);
create_data_directories(pgdata, this_backup_path, true);
/*
* Get list of files which need to be restored.
@ -397,46 +379,50 @@ restore_backup(pgBackup *backup)
pgBackupGetPath(backup, list_path, lengthof(list_path), DATABASE_FILE_LIST);
files = dir_read_file_list(database_path, list_path);
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
threads_args = (restore_files_arg *) palloc(sizeof(restore_files_arg)*num_threads);
/* setup threads */
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
__sync_lock_release(&file->lock);
pgFile *file = (pgFile *) parray_get(files, i);
pg_atomic_clear_flag(&file->lock);
}
/* Restore files into target directory */
for (i = 0; i < num_threads; i++)
{
restore_files_args *arg = pg_malloc(sizeof(restore_files_args));
restore_files_arg *arg = &(threads_args[i]);
arg->files = files;
arg->backup = backup;
/* By default there are some error */
arg->ret = 1;
threads_args[i].ret = 1;
elog(LOG, "Start thread for num:%li", parray_num(files));
restore_threads_args[i] = arg;
pthread_create(&restore_threads[i], NULL,
(void *(*)(void *)) restore_files, arg);
pthread_create(&threads[i], NULL, restore_files, arg);
}
/* Wait theads */
for (i = 0; i < num_threads; i++)
{
pthread_join(restore_threads[i], NULL);
if (restore_threads_args[i]->ret == 1)
pthread_join(threads[i], NULL);
if (threads_args[i].ret == 1)
restore_isok = false;
pg_free(restore_threads_args[i]);
}
if (!restore_isok)
elog(ERROR, "Data files restoring failed");
pfree(threads);
pfree(threads_args);
/* cleanup */
parray_walk(files, pgFileFree);
parray_free(files);
if (LOG_LEVEL_CONSOLE <= LOG || LOG_LEVEL_FILE <= LOG)
if (log_level_console <= LOG || log_level_file <= LOG)
elog(LOG, "restore %s backup completed", base36enc(backup->start_time));
}
@ -452,7 +438,7 @@ remove_deleted_files(pgBackup *backup)
parray *files;
parray *files_restored;
char filelist_path[MAXPGPATH];
int i;
int i;
pgBackupGetPath(backup, filelist_path, lengthof(filelist_path), DATABASE_FILE_LIST);
/* Read backup's filelist using target database path as base path */
@ -473,7 +459,7 @@ remove_deleted_files(pgBackup *backup)
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
{
pgFileDelete(file);
if (LOG_LEVEL_CONSOLE <= LOG || LOG_LEVEL_FILE <= LOG)
if (log_level_console <= LOG || log_level_file <= LOG)
elog(LOG, "deleted %s", GetRelativePath(file->path, pgdata));
}
}
@ -485,228 +471,14 @@ remove_deleted_files(pgBackup *backup)
parray_free(files_restored);
}
/*
* Restore backup directories from **backup_database_dir** to **pg_data_dir**.
*
* TODO: Think about simplification and clarity of the function.
*/
static void
restore_directories(const char *pg_data_dir, const char *backup_dir)
{
parray *dirs,
*links;
size_t i;
char backup_database_dir[MAXPGPATH],
to_path[MAXPGPATH];
dirs = parray_new();
links = parray_new();
join_path_components(backup_database_dir, backup_dir, DATABASE_DIR);
list_data_directories(dirs, backup_database_dir, true, false);
read_tablespace_map(links, backup_dir);
elog(LOG, "restore directories and symlinks...");
for (i = 0; i < parray_num(dirs); i++)
{
pgFile *dir = (pgFile *) parray_get(dirs, i);
char *relative_ptr = GetRelativePath(dir->path, backup_database_dir);
Assert(S_ISDIR(dir->mode));
/* First try to create symlink and linked directory */
if (path_is_prefix_of_path(PG_TBLSPC_DIR, relative_ptr))
{
char *link_ptr = GetRelativePath(relative_ptr, PG_TBLSPC_DIR),
*link_sep,
*tmp_ptr;
char link_name[MAXPGPATH];
pgFile **link;
/* Extract link name from relative path */
link_sep = first_dir_separator(link_ptr);
if (link_sep != NULL)
{
int len = link_sep - link_ptr;
strncpy(link_name, link_ptr, len);
link_name[len] = '\0';
}
else
goto create_directory;
tmp_ptr = dir->path;
dir->path = link_name;
/* Search only by symlink name without path */
link = (pgFile **) parray_bsearch(links, dir, pgFileComparePath);
dir->path = tmp_ptr;
if (link)
{
const char *linked_path = get_tablespace_mapping((*link)->linked);
const char *dir_created;
if (!is_absolute_path(linked_path))
elog(ERROR, "tablespace directory is not an absolute path: %s\n",
linked_path);
/* Check if linked directory was created earlier */
dir_created = get_tablespace_created(link_name);
if (dir_created)
{
/*
* If symlink and linked directory were created do not
* create it second time.
*/
if (strcmp(dir_created, linked_path) == 0)
{
/*
* Create rest of directories.
* First check is there any directory name after
* separator.
*/
if (link_sep != NULL && *(link_sep + 1) != '\0')
goto create_directory;
else
continue;
}
else
elog(ERROR, "tablespace directory \"%s\" of page backup does not "
"match with previous created tablespace directory \"%s\" of symlink \"%s\"",
linked_path, dir_created, link_name);
}
/*
* This check was done in check_tablespace_mapping(). But do
* it again.
*/
if (!dir_is_empty(linked_path))
elog(ERROR, "restore tablespace destination is not empty: \"%s\"",
linked_path);
if (link_sep)
elog(LOG, "create directory \"%s\" and symbolic link \"%.*s\"",
linked_path,
(int) (link_sep - relative_ptr), relative_ptr);
else
elog(LOG, "create directory \"%s\" and symbolic link \"%s\"",
linked_path, relative_ptr);
/* Firstly, create linked directory */
dir_create_dir(linked_path, DIR_PERMISSION);
join_path_components(to_path, pg_data_dir, PG_TBLSPC_DIR);
/* Create pg_tblspc directory just in case */
dir_create_dir(to_path, DIR_PERMISSION);
/* Secondly, create link */
join_path_components(to_path, to_path, link_name);
if (symlink(linked_path, to_path) < 0)
elog(ERROR, "could not create symbolic link \"%s\": %s",
to_path, strerror(errno));
/* Save linked directory */
set_tablespace_created(link_name, linked_path);
/*
* Create rest of directories.
* First check is there any directory name after separator.
*/
if (link_sep != NULL && *(link_sep + 1) != '\0')
goto create_directory;
continue;
}
}
create_directory:
elog(LOG, "create directory \"%s\"", relative_ptr);
/* This is not symlink, create directory */
join_path_components(to_path, pg_data_dir, relative_ptr);
dir_create_dir(to_path, DIR_PERMISSION);
}
parray_walk(links, pgFileFree);
parray_free(links);
parray_walk(dirs, pgFileFree);
parray_free(dirs);
}
/*
* Check that all tablespace mapping entries have correct linked directory
* paths. Linked directories must be empty or do not exist.
*
* If tablespace-mapping option is supplied, all OLDDIR entries must have
* entries in tablespace_map file.
*/
static void
check_tablespace_mapping(pgBackup *backup)
{
char this_backup_path[MAXPGPATH];
parray *links;
size_t i;
TablespaceListCell *cell;
pgFile *tmp_file = pgut_new(pgFile);
links = parray_new();
pgBackupGetPath(backup, this_backup_path, lengthof(this_backup_path), NULL);
read_tablespace_map(links, this_backup_path);
if (LOG_LEVEL_CONSOLE <= LOG || LOG_LEVEL_FILE <= LOG)
elog(LOG, "check tablespace directories of backup %s",
base36enc(backup->start_time));
/* 1 - each OLDDIR must have an entry in tablespace_map file (links) */
for (cell = tablespace_dirs.head; cell; cell = cell->next)
{
tmp_file->linked = cell->old_dir;
if (parray_bsearch(links, tmp_file, pgFileCompareLinked) == NULL)
elog(ERROR, "--tablespace-mapping option's old directory "
"doesn't have an entry in tablespace_map file: \"%s\"",
cell->old_dir);
}
/* 2 - all linked directories must be empty */
for (i = 0; i < parray_num(links); i++)
{
pgFile *link = (pgFile *) parray_get(links, i);
const char *linked_path = link->linked;
TablespaceListCell *cell;
for (cell = tablespace_dirs.head; cell; cell = cell->next)
if (strcmp(link->linked, cell->old_dir) == 0)
{
linked_path = cell->new_dir;
break;
}
if (!is_absolute_path(linked_path))
elog(ERROR, "tablespace directory is not an absolute path: %s\n",
linked_path);
if (!dir_is_empty(linked_path))
elog(ERROR, "restore tablespace destination is not empty: \"%s\"",
linked_path);
}
free(tmp_file);
parray_walk(links, pgFileFree);
parray_free(links);
}
/*
* Restore files into $PGDATA.
*/
static void
static void *
restore_files(void *arg)
{
int i;
restore_files_args *arguments = (restore_files_args *)arg;
restore_files_arg *arguments = (restore_files_arg *)arg;
for (i = 0; i < parray_num(arguments->files); i++)
{
@ -714,7 +486,7 @@ restore_files(void *arg)
char *rel_path;
pgFile *file = (pgFile *) parray_get(arguments->files, i);
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
if (!pg_atomic_test_set_flag(&file->lock))
continue;
pgBackupGetPath(arguments->backup, from_root,
@ -730,7 +502,6 @@ restore_files(void *arg)
elog(LOG, "Progress: (%d/%lu). Process file %s ",
i + 1, (unsigned long) parray_num(arguments->files), rel_path);
/*
* For PAGE and PTRACK backups skip files which haven't changed
* since previous backup and thus were not backed up.
@ -741,11 +512,11 @@ restore_files(void *arg)
(arguments->backup->backup_mode == BACKUP_MODE_DIFF_PAGE
|| arguments->backup->backup_mode == BACKUP_MODE_DIFF_PTRACK))
{
elog(VERBOSE, "The file didn`t changed. Skip restore: %s", file->path);
elog(VERBOSE, "The file didn`t change. Skip restore: %s", file->path);
continue;
}
/* Directories was created before */
/* Directories were created before */
if (S_ISDIR(file->mode))
{
elog(VERBOSE, "directory, skip");
@ -765,20 +536,31 @@ restore_files(void *arg)
* block and have BackupPageHeader meta information, so we cannot just
* copy the file from backup.
*/
elog(VERBOSE, "Restoring file %s, is_datafile %i, is_cfs %i", file->path, file->is_datafile?1:0, file->is_cfs?1:0);
elog(VERBOSE, "Restoring file %s, is_datafile %i, is_cfs %i",
file->path, file->is_datafile?1:0, file->is_cfs?1:0);
if (file->is_datafile && !file->is_cfs)
restore_data_file(from_root, pgdata, file, arguments->backup);
{
char to_path[MAXPGPATH];
join_path_components(to_path, pgdata,
file->path + strlen(from_root) + 1);
restore_data_file(to_path, file,
arguments->backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
false);
}
else
copy_file(from_root, pgdata, file);
/* print size of restored file */
if (file->write_size != BYTES_INVALID)
elog(LOG, "Restored file %s : %lu bytes",
file->path, (unsigned long) file->write_size);
elog(LOG, "Restored file %s : " INT64_FORMAT " bytes",
file->path, file->write_size);
}
/* Data files restoring is successful */
arguments->ret = 0;
return NULL;
}
/* Create recovery.conf with given recovery target parameters */
@ -787,9 +569,9 @@ create_recovery_conf(time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup)
{
char path[MAXPGPATH];
FILE *fp;
bool need_restore_conf = false;
char path[MAXPGPATH];
FILE *fp;
bool need_restore_conf = false;
if (!backup->stream
|| (rt->time_specified || rt->xid_specified))
@ -831,6 +613,9 @@ create_recovery_conf(time_t backup_id,
if (rt->xid_specified)
fprintf(fp, "recovery_target_xid = '%s'\n", rt->target_xid_string);
if (rt->recovery_target_lsn)
fprintf(fp, "recovery_target_lsn = '%s'\n", rt->target_lsn_string);
if (rt->recovery_target_immediate)
fprintf(fp, "recovery_target = 'immediate'\n");
@ -959,7 +744,8 @@ readTimeLineHistory_probackup(TimeLineID targetTLI)
entry = pgut_new(TimeLineHistoryEntry);
entry->tli = targetTLI;
/* LSN in target timeline is valid */
entry->end = (uint32) (-1UL << 32) | -1UL;
/* TODO ensure that -1UL --> -1L fix is correct */
entry->end = (uint32) (-1L << 32) | -1L;
parray_insert(result, 0, entry);
return result;
@ -974,16 +760,22 @@ satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt)
if (rt->time_specified)
return backup->recovery_time <= rt->recovery_target_time;
if (rt->lsn_specified)
return backup->stop_lsn <= rt->recovery_target_lsn;
return true;
}
bool
satisfy_timeline(const parray *timelines, const pgBackup *backup)
{
int i;
int i;
for (i = 0; i < parray_num(timelines); i++)
{
TimeLineHistoryEntry *timeline = (TimeLineHistoryEntry *) parray_get(timelines, i);
TimeLineHistoryEntry *timeline;
timeline = (TimeLineHistoryEntry *) parray_get(timelines, i);
if (backup->tli == timeline->tli &&
backup->stop_lsn < timeline->end)
return true;
@ -999,33 +791,40 @@ parseRecoveryTargetOptions(const char *target_time,
const char *target_xid,
const char *target_inclusive,
TimeLineID target_tli,
const char *target_lsn,
bool target_immediate,
const char *target_name,
const char *target_action)
const char *target_action,
bool restore_no_validate)
{
time_t dummy_time;
TransactionId dummy_xid;
bool dummy_bool;
time_t dummy_time;
TransactionId dummy_xid;
bool dummy_bool;
XLogRecPtr dummy_lsn;
/*
* count the number of the mutually exclusive options which may specify
* recovery target. If final value > 1, throw an error.
*/
int recovery_target_specified = 0;
int recovery_target_specified = 0;
pgRecoveryTarget *rt = pgut_new(pgRecoveryTarget);
/* fill all options with default values */
rt->time_specified = false;
rt->xid_specified = false;
rt->inclusive_specified = false;
rt->lsn_specified = false;
rt->recovery_target_time = 0;
rt->recovery_target_xid = 0;
rt->recovery_target_lsn = InvalidXLogRecPtr;
rt->target_time_string = NULL;
rt->target_xid_string = NULL;
rt->target_lsn_string = NULL;
rt->recovery_target_inclusive = false;
rt->recovery_target_tli = 0;
rt->recovery_target_immediate = false;
rt->recovery_target_name = NULL;
rt->recovery_target_action = NULL;
rt->restore_no_validate = false;
/* parse given options */
if (target_time)
@ -1034,7 +833,7 @@ parseRecoveryTargetOptions(const char *target_time,
rt->time_specified = true;
rt->target_time_string = target_time;
if (parse_time(target_time, &dummy_time))
if (parse_time(target_time, &dummy_time, false))
rt->recovery_target_time = dummy_time;
else
elog(ERROR, "Invalid value of --time option %s", target_time);
@ -1056,6 +855,17 @@ parseRecoveryTargetOptions(const char *target_time,
elog(ERROR, "Invalid value of --xid option %s", target_xid);
}
if (target_lsn)
{
recovery_target_specified++;
rt->lsn_specified = true;
rt->target_lsn_string = target_lsn;
if (parse_lsn(target_lsn, &dummy_lsn))
rt->recovery_target_lsn = dummy_lsn;
else
elog(ERROR, "Invalid value of --lsn option %s", target_lsn);
}
if (target_inclusive)
{
rt->inclusive_specified = true;
@ -1072,6 +882,11 @@ parseRecoveryTargetOptions(const char *target_time,
rt->recovery_target_immediate = target_immediate;
}
if (restore_no_validate)
{
rt->restore_no_validate = restore_no_validate;
}
if (target_name)
{
recovery_target_specified++;
@ -1095,123 +910,11 @@ parseRecoveryTargetOptions(const char *target_time,
/* More than one mutually exclusive option was defined. */
if (recovery_target_specified > 1)
elog(ERROR, "At most one of --immediate, --target-name, --time, or --xid can be used");
elog(ERROR, "At most one of --immediate, --target-name, --time, --xid, or --lsn can be used");
/* If none of the options is defined, '--inclusive' option is meaningless */
if (!(rt->xid_specified || rt->time_specified) && rt->recovery_target_inclusive)
if (!(rt->xid_specified || rt->time_specified || rt->lsn_specified) && rt->recovery_target_inclusive)
elog(ERROR, "--inclusive option applies when either --time or --xid is specified");
return rt;
}
/*
* Split argument into old_dir and new_dir and append to tablespace mapping
* list.
*
* Copy of function tablespace_list_append() from pg_basebackup.c.
*/
void
opt_tablespace_map(pgut_option *opt, const char *arg)
{
TablespaceListCell *cell = pgut_new(TablespaceListCell);
char *dst;
char *dst_ptr;
const char *arg_ptr;
dst_ptr = dst = cell->old_dir;
for (arg_ptr = arg; *arg_ptr; arg_ptr++)
{
if (dst_ptr - dst >= MAXPGPATH)
elog(ERROR, "directory name too long");
if (*arg_ptr == '\\' && *(arg_ptr + 1) == '=')
; /* skip backslash escaping = */
else if (*arg_ptr == '=' && (arg_ptr == arg || *(arg_ptr - 1) != '\\'))
{
if (*cell->new_dir)
elog(ERROR, "multiple \"=\" signs in tablespace mapping\n");
else
dst = dst_ptr = cell->new_dir;
}
else
*dst_ptr++ = *arg_ptr;
}
if (!*cell->old_dir || !*cell->new_dir)
elog(ERROR, "invalid tablespace mapping format \"%s\", "
"must be \"OLDDIR=NEWDIR\"", arg);
/*
* This check isn't absolutely necessary. But all tablespaces are created
* with absolute directories, so specifying a non-absolute path here would
* just never match, possibly confusing users. It's also good to be
* consistent with the new_dir check.
*/
if (!is_absolute_path(cell->old_dir))
elog(ERROR, "old directory is not an absolute path in tablespace mapping: %s\n",
cell->old_dir);
if (!is_absolute_path(cell->new_dir))
elog(ERROR, "new directory is not an absolute path in tablespace mapping: %s\n",
cell->new_dir);
if (tablespace_dirs.tail)
tablespace_dirs.tail->next = cell;
else
tablespace_dirs.head = cell;
tablespace_dirs.tail = cell;
}
/*
* Retrieve tablespace path, either relocated or original depending on whether
* -T was passed or not.
*
* Copy of function get_tablespace_mapping() from pg_basebackup.c.
*/
static const char *
get_tablespace_mapping(const char *dir)
{
TablespaceListCell *cell;
for (cell = tablespace_dirs.head; cell; cell = cell->next)
if (strcmp(dir, cell->old_dir) == 0)
return cell->new_dir;
return dir;
}
/*
* Save create directory path into memory. We can use it in next page restore to
* not raise the error "restore tablespace destination is not empty" in
* restore_directories().
*/
static void
set_tablespace_created(const char *link, const char *dir)
{
TablespaceCreatedListCell *cell = pgut_new(TablespaceCreatedListCell);
strcpy(cell->link_name, link);
strcpy(cell->linked_dir, dir);
cell->next = NULL;
if (tablespace_created_dirs.tail)
tablespace_created_dirs.tail->next = cell;
else
tablespace_created_dirs.head = cell;
tablespace_created_dirs.tail = cell;
}
/*
* Is directory was created when symlink was created in restore_directories().
*/
static const char *
get_tablespace_created(const char *link)
{
TablespaceCreatedListCell *cell;
for (cell = tablespace_created_dirs.head; cell; cell = cell->next)
if (strcmp(link, cell->link_name) == 0)
return cell->linked_dir;
return NULL;
}

View File

@ -3,28 +3,40 @@
* show.c: show backup information.
*
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2017, Postgres Professional
* Portions Copyright (c) 2015-2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <time.h>
#include <unistd.h>
#include <dirent.h>
#include <sys/stat.h>
#include "pqexpbuffer.h"
static void show_backup_list(FILE *out, parray *backup_list);
static void show_backup_detail(FILE *out, pgBackup *backup);
static int do_show_instance(time_t requested_backup_id);
#include "utils/json.h"
static void show_instance_start(void);
static void show_instance_end(void);
static void show_instance(time_t requested_backup_id, bool show_name);
static int show_backup(time_t requested_backup_id);
static void show_instance_plain(parray *backup_list, bool show_name);
static void show_instance_json(parray *backup_list);
static PQExpBufferData show_buf;
static bool first_instance = true;
static int32 json_level = 0;
int
do_show(time_t requested_backup_id)
{
if (instance_name == NULL
&& requested_backup_id != INVALID_BACKUP_ID)
if (instance_name == NULL &&
requested_backup_id != INVALID_BACKUP_ID)
elog(ERROR, "You must specify --instance to use --backup_id option");
if (instance_name == NULL)
@ -38,10 +50,12 @@ do_show(time_t requested_backup_id)
join_path_components(path, backup_path, BACKUPS_DIR);
dir = opendir(path);
if (dir == NULL)
elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno));
elog(ERROR, "Cannot open directory \"%s\": %s",
path, strerror(errno));
errno = 0;
while ((dent = readdir(dir)))
show_instance_start();
while (errno = 0, (dent = readdir(dir)) != NULL)
{
char child[MAXPGPATH];
struct stat st;
@ -54,73 +68,47 @@ do_show(time_t requested_backup_id)
join_path_components(child, path, dent->d_name);
if (lstat(child, &st) == -1)
elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno));
elog(ERROR, "Cannot stat file \"%s\": %s",
child, strerror(errno));
if (!S_ISDIR(st.st_mode))
continue;
instance_name = dent->d_name;
sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name);
fprintf(stdout, "\nBACKUP INSTANCE '%s'\n", instance_name);
do_show_instance(0);
show_instance(INVALID_BACKUP_ID, true);
}
if (errno)
elog(ERROR, "Cannot read directory \"%s\": %s",
path, strerror(errno));
if (closedir(dir))
elog(ERROR, "Cannot close directory \"%s\": %s",
path, strerror(errno));
show_instance_end();
return 0;
}
else if (requested_backup_id == INVALID_BACKUP_ID ||
show_format == SHOW_JSON)
{
show_instance_start();
show_instance(requested_backup_id, false);
show_instance_end();
return 0;
}
else
return do_show_instance(requested_backup_id);
}
/*
* If 'requested_backup_id' is INVALID_BACKUP_ID, show brief meta information
* about all backups in the backup instance.
* If valid backup id is passed, show detailed meta information
* about specified backup.
*/
static int
do_show_instance(time_t requested_backup_id)
{
if (requested_backup_id != INVALID_BACKUP_ID)
{
pgBackup *backup;
backup = read_backup(requested_backup_id);
if (backup == NULL)
{
elog(INFO, "Requested backup \"%s\" is not found.",
/* We do not need free base36enc's result, we exit anyway */
base36enc(requested_backup_id));
/* This is not error */
return 0;
}
show_backup_detail(stdout, backup);
/* cleanup */
pgBackupFree(backup);
}
else
{
parray *backup_list;
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
if (backup_list == NULL)
elog(ERROR, "Failed to get backup list.");
show_backup_list(stdout, backup_list);
/* cleanup */
parray_walk(backup_list, pgBackupFree);
parray_free(backup_list);
}
return 0;
return show_backup(requested_backup_id);
}
static void
pretty_size(int64 size, char *buf, size_t len)
{
int exp = 0;
int exp = 0;
/* minus means the size is invalid */
if (size < 0)
@ -219,16 +207,111 @@ get_parent_tli(TimeLineID child_tli)
return result;
}
/*
* Initialize instance visualization.
*/
static void
show_backup_list(FILE *out, parray *backup_list)
show_instance_start(void)
{
initPQExpBuffer(&show_buf);
if (show_format == SHOW_PLAIN)
return;
first_instance = true;
json_level = 0;
appendPQExpBufferChar(&show_buf, '[');
json_level++;
}
/*
* Finalize instance visualization.
*/
static void
show_instance_end(void)
{
if (show_format == SHOW_JSON)
appendPQExpBufferStr(&show_buf, "\n]\n");
fputs(show_buf.data, stdout);
termPQExpBuffer(&show_buf);
}
/*
* Show brief meta information about all backups in the backup instance.
*/
static void
show_instance(time_t requested_backup_id, bool show_name)
{
parray *backup_list;
backup_list = catalog_get_backup_list(requested_backup_id);
if (show_format == SHOW_PLAIN)
show_instance_plain(backup_list, show_name);
else if (show_format == SHOW_JSON)
show_instance_json(backup_list);
else
elog(ERROR, "Invalid show format %d", (int) show_format);
/* cleanup */
parray_walk(backup_list, pgBackupFree);
parray_free(backup_list);
}
/*
* Show detailed meta information about specified backup.
*/
static int
show_backup(time_t requested_backup_id)
{
pgBackup *backup;
backup = read_backup(requested_backup_id);
if (backup == NULL)
{
elog(INFO, "Requested backup \"%s\" is not found.",
/* We do not need free base36enc's result, we exit anyway */
base36enc(requested_backup_id));
/* This is not error */
return 0;
}
if (show_format == SHOW_PLAIN)
pgBackupWriteControl(stdout, backup);
else
elog(ERROR, "Invalid show format %d", (int) show_format);
/* cleanup */
pgBackupFree(backup);
return 0;
}
/*
* Plain output.
*/
/*
* Show instance backups in plain format.
*/
static void
show_instance_plain(parray *backup_list, bool show_name)
{
int i;
if (show_name)
printfPQExpBuffer(&show_buf, "\nBACKUP INSTANCE '%s'\n", instance_name);
/* if you add new fields here, fix the header */
/* show header */
fputs("============================================================================================================================================\n", out);
fputs(" Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n", out);
fputs("============================================================================================================================================\n", out);
appendPQExpBufferStr(&show_buf,
"============================================================================================================================================\n");
appendPQExpBufferStr(&show_buf,
" Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n");
appendPQExpBufferStr(&show_buf,
"============================================================================================================================================\n");
for (i = 0; i < parray_num(backup_list); i++)
{
@ -255,27 +338,163 @@ show_backup_list(FILE *out, parray *backup_list)
/* Get parent timeline before printing */
parent_tli = get_parent_tli(backup->tli);
fprintf(out, " %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n",
instance_name,
(backup->server_version[0] ? backup->server_version : "----"),
base36enc(backup->start_time),
timestamp,
pgBackupGetBackupMode(backup),
backup->stream ? "STREAM": "ARCHIVE",
backup->tli,
parent_tli,
duration,
data_bytes_str,
(uint32) (backup->start_lsn >> 32),
(uint32) backup->start_lsn,
(uint32) (backup->stop_lsn >> 32),
(uint32) backup->stop_lsn,
status2str(backup->status));
appendPQExpBuffer(&show_buf,
" %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n",
instance_name,
(backup->server_version[0] ? backup->server_version : "----"),
base36enc(backup->start_time),
timestamp,
pgBackupGetBackupMode(backup),
backup->stream ? "STREAM": "ARCHIVE",
backup->tli,
parent_tli,
duration,
data_bytes_str,
(uint32) (backup->start_lsn >> 32),
(uint32) backup->start_lsn,
(uint32) (backup->stop_lsn >> 32),
(uint32) backup->stop_lsn,
status2str(backup->status));
}
}
/*
* Json output.
*/
/*
* Show instance backups in json format.
*/
static void
show_backup_detail(FILE *out, pgBackup *backup)
show_instance_json(parray *backup_list)
{
pgBackupWriteControl(out, backup);
int i;
PQExpBuffer buf = &show_buf;
if (!first_instance)
appendPQExpBufferChar(buf, ',');
/* Begin of instance object */
json_add(buf, JT_BEGIN_OBJECT, &json_level);
json_add_value(buf, "instance", instance_name, json_level, false);
json_add_key(buf, "backups", json_level, true);
/*
* List backups.
*/
json_add(buf, JT_BEGIN_ARRAY, &json_level);
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = parray_get(backup_list, i);
TimeLineID parent_tli;
char timestamp[100] = "----";
char lsn[20];
if (i != 0)
appendPQExpBufferChar(buf, ',');
json_add(buf, JT_BEGIN_OBJECT, &json_level);
json_add_value(buf, "id", base36enc(backup->start_time), json_level,
false);
if (backup->parent_backup != 0)
json_add_value(buf, "parent-backup-id",
base36enc(backup->parent_backup), json_level, true);
json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup),
json_level, true);
json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE",
json_level, true);
json_add_value(buf, "compress-alg",
deparse_compress_alg(backup->compress_alg), json_level,
true);
json_add_key(buf, "compress-level", json_level, true);
appendPQExpBuffer(buf, "%d", backup->compress_level);
json_add_value(buf, "from-replica",
backup->from_replica ? "true" : "false", json_level,
true);
json_add_key(buf, "block-size", json_level, true);
appendPQExpBuffer(buf, "%u", backup->block_size);
json_add_key(buf, "xlog-block-size", json_level, true);
appendPQExpBuffer(buf, "%u", backup->wal_block_size);
json_add_key(buf, "checksum-version", json_level, true);
appendPQExpBuffer(buf, "%u", backup->checksum_version);
json_add_value(buf, "program-version", backup->program_version,
json_level, true);
json_add_value(buf, "server-version", backup->server_version,
json_level, true);
json_add_key(buf, "current-tli", json_level, true);
appendPQExpBuffer(buf, "%d", backup->tli);
json_add_key(buf, "parent-tli", json_level, true);
parent_tli = get_parent_tli(backup->tli);
appendPQExpBuffer(buf, "%u", parent_tli);
snprintf(lsn, lengthof(lsn), "%X/%X",
(uint32) (backup->start_lsn >> 32), (uint32) backup->start_lsn);
json_add_value(buf, "start-lsn", lsn, json_level, true);
snprintf(lsn, lengthof(lsn), "%X/%X",
(uint32) (backup->stop_lsn >> 32), (uint32) backup->stop_lsn);
json_add_value(buf, "stop-lsn", lsn, json_level, true);
time2iso(timestamp, lengthof(timestamp), backup->start_time);
json_add_value(buf, "start-time", timestamp, json_level, true);
if (backup->end_time)
{
time2iso(timestamp, lengthof(timestamp), backup->end_time);
json_add_value(buf, "end-time", timestamp, json_level, true);
}
json_add_key(buf, "recovery-xid", json_level, true);
appendPQExpBuffer(buf, XID_FMT, backup->recovery_xid);
if (backup->recovery_time > 0)
{
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
json_add_value(buf, "recovery-time", timestamp, json_level, true);
}
if (backup->data_bytes != BYTES_INVALID)
{
json_add_key(buf, "data-bytes", json_level, true);
appendPQExpBuffer(buf, INT64_FORMAT, backup->data_bytes);
}
if (backup->wal_bytes != BYTES_INVALID)
{
json_add_key(buf, "wal-bytes", json_level, true);
appendPQExpBuffer(buf, INT64_FORMAT, backup->wal_bytes);
}
if (backup->primary_conninfo)
json_add_value(buf, "primary_conninfo", backup->primary_conninfo,
json_level, true);
json_add_value(buf, "status", status2str(backup->status), json_level,
true);
json_add(buf, JT_END_OBJECT, &json_level);
}
/* End of backups */
json_add(buf, JT_END_ARRAY, &json_level);
/* End of instance object */
json_add(buf, JT_END_OBJECT, &json_level);
first_instance = false;
}

View File

@ -38,7 +38,7 @@ get_pgpid(void)
snprintf(pid_file, lengthof(pid_file), "%s/postmaster.pid", pgdata);
pidf = fopen(pid_file, "r");
pidf = fopen(pid_file, PG_BINARY_R);
if (pidf == NULL)
{
/* No pid file, not an error on startup */

View File

@ -176,8 +176,8 @@ uint32
get_data_checksum_version(bool safe)
{
ControlFileData ControlFile;
char *buffer;
size_t size;
char *buffer;
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata, "global/pg_control", &size, safe);
@ -191,7 +191,7 @@ get_data_checksum_version(bool safe)
/*
* Convert time_t value to ISO-8601 format string
* Convert time_t value to ISO-8601 format string. Always set timezone offset.
*/
void
time2iso(char *buf, size_t len, time_t time)
@ -199,25 +199,23 @@ time2iso(char *buf, size_t len, time_t time)
struct tm *ptm = gmtime(&time);
time_t gmt = mktime(ptm);
time_t offset;
char *ptr = buf;
ptm = localtime(&time);
offset = time - gmt + (ptm->tm_isdst ? 3600 : 0);
strftime(buf, len, "%Y-%m-%d %H:%M:%S", ptm);
strftime(ptr, len, "%Y-%m-%d %H:%M:%S", ptm);
if (offset != 0)
ptr += strlen(ptr);
snprintf(ptr, len - (ptr - buf), "%c%02d",
(offset >= 0) ? '+' : '-',
abs((int) offset) / SECS_PER_HOUR);
if (abs((int) offset) % SECS_PER_HOUR != 0)
{
buf += strlen(buf);
sprintf(buf, "%c%02d",
(offset >= 0) ? '+' : '-',
abs((int) offset) / SECS_PER_HOUR);
if (abs((int) offset) % SECS_PER_HOUR != 0)
{
buf += strlen(buf);
sprintf(buf, ":%02d",
abs((int) offset % SECS_PER_HOUR) / SECS_PER_MINUTE);
}
ptr += strlen(ptr);
snprintf(ptr, len - (ptr - buf), ":%02d",
abs((int) offset % SECS_PER_HOUR) / SECS_PER_MINUTE);
}
}
@ -237,6 +235,35 @@ timestamptz_to_time_t(TimestampTz t)
return result;
}
/* Parse string representation of the server version */
int
parse_server_version(char *server_version_str)
{
int nfields;
int result = 0;
int major_version = 0;
int minor_version = 0;
nfields = sscanf(server_version_str, "%d.%d", &major_version, &minor_version);
if (nfields == 2)
{
/* Server version lower than 10 */
if (major_version > 10)
elog(ERROR, "Server version format doesn't match major version %d", major_version);
result = major_version * 10000 + minor_version * 100;
}
else if (nfields == 1)
{
if (major_version < 10)
elog(ERROR, "Server version format doesn't match major version %d", major_version);
result = major_version * 10000;
}
else
elog(ERROR, "Unknown server version format");
return result;
}
const char *
status2str(BackupStatus status)
{
@ -244,8 +271,9 @@ status2str(BackupStatus status)
{
"UNKNOWN",
"OK",
"RUNNING",
"ERROR",
"RUNNING",
"MERGING",
"DELETING",
"DELETED",
"DONE",
@ -295,26 +323,3 @@ remove_not_digit(char *buf, size_t len, const char *str)
}
buf[j] = '\0';
}
/* Fill pgBackup struct with default values */
void
pgBackup_init(pgBackup *backup)
{
backup->backup_id = INVALID_BACKUP_ID;
backup->backup_mode = BACKUP_MODE_INVALID;
backup->status = BACKUP_STATUS_INVALID;
backup->tli = 0;
backup->start_lsn = 0;
backup->stop_lsn = 0;
backup->start_time = (time_t) 0;
backup->end_time = (time_t) 0;
backup->recovery_xid = 0;
backup->recovery_time = (time_t) 0;
backup->data_bytes = BYTES_INVALID;
backup->block_size = BLCKSZ;
backup->wal_block_size = XLOG_BLCKSZ;
backup->stream = false;
backup->parent_backup = 0;
backup->primary_conninfo = NULL;
backup->server_version[0] = '\0';
}

134
src/utils/json.c Normal file
View File

@ -0,0 +1,134 @@
/*-------------------------------------------------------------------------
*
* json.c: - make json document.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "json.h"
static void json_add_indent(PQExpBuffer buf, int32 level);
static void json_add_escaped(PQExpBuffer buf, const char *str);
/*
* Start or end json token. Currently it is a json object or array.
*
* Function modifies level value and adds indent if it appropriate.
*/
void
json_add(PQExpBuffer buf, JsonToken type, int32 *level)
{
switch (type)
{
case JT_BEGIN_ARRAY:
appendPQExpBufferChar(buf, '[');
*level += 1;
break;
case JT_END_ARRAY:
*level -= 1;
if (*level == 0)
appendPQExpBufferChar(buf, '\n');
else
json_add_indent(buf, *level);
appendPQExpBufferChar(buf, ']');
break;
case JT_BEGIN_OBJECT:
json_add_indent(buf, *level);
appendPQExpBufferChar(buf, '{');
*level += 1;
break;
case JT_END_OBJECT:
*level -= 1;
if (*level == 0)
appendPQExpBufferChar(buf, '\n');
else
json_add_indent(buf, *level);
appendPQExpBufferChar(buf, '}');
break;
default:
break;
}
}
/*
* Add json object's key. If it isn't first key we need to add a comma.
*/
void
json_add_key(PQExpBuffer buf, const char *name, int32 level, bool add_comma)
{
if (add_comma)
appendPQExpBufferChar(buf, ',');
json_add_indent(buf, level);
json_add_escaped(buf, name);
appendPQExpBufferStr(buf, ": ");
}
/*
* Add json object's key and value. If it isn't first key we need to add a
* comma.
*/
void
json_add_value(PQExpBuffer buf, const char *name, const char *value,
int32 level, bool add_comma)
{
json_add_key(buf, name, level, add_comma);
json_add_escaped(buf, value);
}
static void
json_add_indent(PQExpBuffer buf, int32 level)
{
uint16 i;
if (level == 0)
return;
appendPQExpBufferChar(buf, '\n');
for (i = 0; i < level; i++)
appendPQExpBufferStr(buf, " ");
}
static void
json_add_escaped(PQExpBuffer buf, const char *str)
{
const char *p;
appendPQExpBufferChar(buf, '"');
for (p = str; *p; p++)
{
switch (*p)
{
case '\b':
appendPQExpBufferStr(buf, "\\b");
break;
case '\f':
appendPQExpBufferStr(buf, "\\f");
break;
case '\n':
appendPQExpBufferStr(buf, "\\n");
break;
case '\r':
appendPQExpBufferStr(buf, "\\r");
break;
case '\t':
appendPQExpBufferStr(buf, "\\t");
break;
case '"':
appendPQExpBufferStr(buf, "\\\"");
break;
case '\\':
appendPQExpBufferStr(buf, "\\\\");
break;
default:
if ((unsigned char) *p < ' ')
appendPQExpBuffer(buf, "\\u%04x", (int) *p);
else
appendPQExpBufferChar(buf, *p);
break;
}
}
appendPQExpBufferChar(buf, '"');
}

33
src/utils/json.h Normal file
View File

@ -0,0 +1,33 @@
/*-------------------------------------------------------------------------
*
* json.h: - prototypes of json output functions.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#ifndef PROBACKUP_JSON_H
#define PROBACKUP_JSON_H
#include "postgres_fe.h"
#include "pqexpbuffer.h"
/*
* Json document tokens.
*/
typedef enum
{
JT_BEGIN_ARRAY,
JT_END_ARRAY,
JT_BEGIN_OBJECT,
JT_END_OBJECT
} JsonToken;
extern void json_add(PQExpBuffer buf, JsonToken type, int32 *level);
extern void json_add_key(PQExpBuffer buf, const char *name, int32 level,
bool add_comma);
extern void json_add_value(PQExpBuffer buf, const char *name, const char *value,
int32 level, bool add_comma);
#endif /* PROBACKUP_JSON_H */

View File

@ -8,7 +8,6 @@
*/
#include <errno.h>
#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
@ -16,11 +15,13 @@
#include "logger.h"
#include "pgut.h"
#include "pg_probackup.h"
#include "thread.h"
/* Logger parameters */
int log_level_console = LOG_NONE;
int log_level_file = LOG_NONE;
int log_level_console = LOG_LEVEL_CONSOLE_DEFAULT;
int log_level_file = LOG_LEVEL_FILE_DEFAULT;
char *log_filename = NULL;
char *error_log_filename = NULL;
@ -74,12 +75,12 @@ void
init_logger(const char *root_path)
{
/* Set log path */
if (LOG_LEVEL_FILE != LOG_OFF || error_log_filename)
if (log_level_file != LOG_OFF || error_log_filename)
{
if (log_directory)
strcpy(log_path, log_directory);
else
join_path_components(log_path, root_path, "log");
join_path_components(log_path, root_path, LOG_DIRECTORY_DEFAULT);
}
}
@ -138,11 +139,10 @@ exit_if_necessary(int elevel)
}
/* If this is not the main thread then don't call exit() */
if (main_tid != pthread_self())
#ifdef WIN32
if (main_tid != GetCurrentThreadId())
ExitThread(elevel);
#else
if (!pthread_equal(main_tid, pthread_self()))
pthread_exit(NULL);
#endif
else
@ -166,15 +166,16 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
time_t log_time = (time_t) time(NULL);
char strfbuf[128];
write_to_file = elevel >= LOG_LEVEL_FILE && log_path[0] != '\0';
write_to_file = elevel >= log_level_file && log_path[0] != '\0';
write_to_error_log = elevel >= ERROR && error_log_filename &&
log_path[0] != '\0';
write_to_stderr = elevel >= LOG_LEVEL_CONSOLE && !file_only;
write_to_stderr = elevel >= log_level_console && !file_only;
/*
* There is no need to lock if this is elog() from upper elog().
*/
pthread_mutex_lock(&log_file_mutex);
pthread_lock(&log_file_mutex);
#ifdef WIN32
std_args = NULL;
error_args = NULL;
#endif
loggin_in_progress = true;
/* We need copy args only if we need write to error log file */
@ -201,7 +202,7 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
if (log_file == NULL)
{
if (log_filename == NULL)
open_logfile(&log_file, "pg_probackup.log");
open_logfile(&log_file, LOG_FILENAME_DEFAULT);
else
open_logfile(&log_file, log_filename);
}
@ -241,7 +242,6 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
if (write_to_stderr)
{
write_elevel(stderr, elevel);
if (write_to_file)
vfprintf(stderr, fmt, std_args);
else
@ -272,7 +272,7 @@ elog_stderr(int elevel, const char *fmt, ...)
* Do not log message if severity level is less than log_level.
* It is the little optimisation to put it here not in elog_internal().
*/
if (elevel < LOG_LEVEL_CONSOLE && elevel < ERROR)
if (elevel < log_level_console && elevel < ERROR)
return;
va_start(args, fmt);
@ -299,7 +299,7 @@ elog(int elevel, const char *fmt, ...)
* Do not log message if severity level is less than log_level.
* It is the little optimisation to put it here not in elog_internal().
*/
if (elevel < LOG_LEVEL_CONSOLE && elevel < LOG_LEVEL_FILE && elevel < ERROR)
if (elevel < log_level_console && elevel < log_level_file && elevel < ERROR)
return;
va_start(args, fmt);
@ -319,7 +319,7 @@ elog_file(int elevel, const char *fmt, ...)
* Do not log message if severity level is less than log_level.
* It is the little optimisation to put it here not in elog_internal().
*/
if (elevel < LOG_LEVEL_FILE && elevel < ERROR)
if (elevel < log_level_file && elevel < ERROR)
return;
va_start(args, fmt);
@ -360,7 +360,7 @@ pg_log(eLogType type, const char *fmt, ...)
* Do not log message if severity level is less than log_level.
* It is the little optimisation to put it here not in elog_internal().
*/
if (elevel < LOG_LEVEL_CONSOLE && elevel < LOG_LEVEL_FILE && elevel < ERROR)
if (elevel < log_level_console && elevel < log_level_file && elevel < ERROR)
return;
va_start(args, fmt);

View File

@ -36,11 +36,13 @@ extern char *error_log_filename;
extern char *log_directory;
extern char log_path[MAXPGPATH];
#define LOG_ROTATION_SIZE_DEFAULT 0
#define LOG_ROTATION_AGE_DEFAULT 0
extern int log_rotation_size;
extern int log_rotation_age;
#define LOG_LEVEL_CONSOLE ((log_level_console == LOG_NONE) ? INFO : log_level_console)
#define LOG_LEVEL_FILE ((log_level_file == LOG_NONE) ? LOG_OFF : log_level_file)
#define LOG_LEVEL_CONSOLE_DEFAULT INFO
#define LOG_LEVEL_FILE_DEFAULT LOG_OFF
#undef elog
extern void elog(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3);

View File

@ -31,6 +31,7 @@
#define MAX_TZDISP_HOUR 15 /* maximum allowed hour part */
#define SECS_PER_MINUTE 60
#define MINS_PER_HOUR 60
#define MAXPG_LSNCOMPONENT 8
const char *PROGRAM_NAME = NULL;
@ -42,12 +43,6 @@ static char *password = NULL;
bool prompt_password = true;
bool force_password = false;
#ifdef WIN32
DWORD main_tid = 0;
#else
pthread_t main_tid = 0;
#endif
/* Database connections */
static PGcancel *volatile cancel_conn = NULL;
@ -141,8 +136,10 @@ static const unit_conversion time_unit_conversion_table[] =
static size_t
option_length(const pgut_option opts[])
{
size_t len;
size_t len;
for (len = 0; opts && opts[len].type; len++) { }
return len;
}
@ -162,7 +159,7 @@ option_has_arg(char type)
static void
option_copy(struct option dst[], const pgut_option opts[], size_t len)
{
size_t i;
size_t i;
for (i = 0; i < len; i++)
{
@ -260,7 +257,8 @@ assign_option(pgut_option *opt, const char *optarg, pgut_optsrc src)
message = "a valid string. But provided: ";
break;
case 't':
if (parse_time(optarg, opt->var))
if (parse_time(optarg, opt->var,
opt->source == SOURCE_FILE))
return;
message = "a time";
break;
@ -750,9 +748,12 @@ parse_uint64(const char *value, uint64 *result, int flags)
/*
* Convert ISO-8601 format string to time_t value.
*
* If utc_default is true, then if timezone offset isn't specified tz will be
* +00:00.
*/
bool
parse_time(const char *value, time_t *result)
parse_time(const char *value, time_t *result, bool utc_default)
{
size_t len;
int fields_num,
@ -874,7 +875,7 @@ parse_time(const char *value, time_t *result)
*result = mktime(&tm);
/* adjust time zone */
if (tz_set)
if (tz_set || utc_default)
{
time_t ltime = time(NULL);
struct tm *ptm = gmtime(&ltime);
@ -983,6 +984,32 @@ parse_int(const char *value, int *result, int flags, const char **hintmsg)
return true;
}
bool
parse_lsn(const char *value, XLogRecPtr *result)
{
uint32 xlogid;
uint32 xrecoff;
int len1;
int len2;
len1 = strspn(value, "0123456789abcdefABCDEF");
if (len1 < 1 || len1 > MAXPG_LSNCOMPONENT || value[len1] != '/')
elog(ERROR, "invalid LSN \"%s\"", value);
len2 = strspn(value + len1 + 1, "0123456789abcdefABCDEF");
if (len2 < 1 || len2 > MAXPG_LSNCOMPONENT || value[len1 + 1 + len2] != '\0')
elog(ERROR, "invalid LSN \"%s\"", value);
if (sscanf(value, "%X/%X", &xlogid, &xrecoff) == 2)
*result = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
else
{
elog(ERROR, "invalid LSN \"%s\"", value);
return false;
}
return true;
}
static char *
longopts_to_optstring(const struct option opts[], const size_t len)
{
@ -1053,7 +1080,7 @@ pgut_getopt(int argc, char **argv, pgut_option options[])
size_t len;
len = option_length(options);
longopts = pgut_newarray(struct option, len + 1);
longopts = pgut_newarray(struct option, len + 1 /* zero/end option */);
option_copy(longopts, options, len);
optstring = longopts_to_optstring(longopts, len);
@ -1095,20 +1122,22 @@ key_equals(const char *lhs, const char *rhs)
/*
* Get configuration from configuration file.
* Return number of parsed options
*/
void
int
pgut_readopt(const char *path, pgut_option options[], int elevel)
{
FILE *fp;
char buf[1024];
char key[1024];
char value[1024];
int parsed_options = 0;
if (!options)
return;
return parsed_options;
if ((fp = pgut_fopen(path, "rt", true)) == NULL)
return;
return parsed_options;
while (fgets(buf, lengthof(buf), fp))
{
@ -1127,18 +1156,23 @@ pgut_readopt(const char *path, pgut_option options[], int elevel)
{
if (opt->allowed < SOURCE_FILE &&
opt->allowed != SOURCE_FILE_STRICT)
elog(elevel, "option %s cannot specified in file", opt->lname);
elog(elevel, "option %s cannot be specified in file", opt->lname);
else if (opt->source <= SOURCE_FILE)
{
assign_option(opt, value, SOURCE_FILE);
parsed_options++;
}
break;
}
}
if (!options[i].type)
elog(elevel, "invalid option \"%s\"", key);
elog(elevel, "invalid option \"%s\" in file \"%s\"", key, path);
}
}
fclose(fp);
return parsed_options;
}
static const char *
@ -1225,7 +1259,7 @@ get_next_token(const char *src, char *dst, const char *line)
}
else
{
i = j = strcspn(s, "# \n\r\t\v");
i = j = strcspn(s, "#\n\r\t\v");
memcpy(dst, s, j);
}
@ -1646,7 +1680,7 @@ pgut_execute_parallel(PGconn* conn,
elog(ERROR, "interrupted");
/* write query to elog if verbose */
if (LOG_LEVEL_CONSOLE <= VERBOSE || LOG_LEVEL_FILE <= VERBOSE)
if (log_level_console <= VERBOSE || log_level_file <= VERBOSE)
{
int i;
@ -1708,7 +1742,7 @@ pgut_execute_extended(PGconn* conn, const char *query, int nParams,
elog(ERROR, "interrupted");
/* write query to elog if verbose */
if (LOG_LEVEL_CONSOLE <= VERBOSE || LOG_LEVEL_FILE <= VERBOSE)
if (log_level_console <= VERBOSE || log_level_file <= VERBOSE)
{
int i;
@ -1766,7 +1800,7 @@ pgut_send(PGconn* conn, const char *query, int nParams, const char **params, int
elog(ERROR, "interrupted");
/* write query to elog if verbose */
if (LOG_LEVEL_CONSOLE <= VERBOSE || LOG_LEVEL_FILE <= VERBOSE)
if (log_level_console <= VERBOSE || log_level_file <= VERBOSE)
{
int i;

View File

@ -15,9 +15,9 @@
#include "pqexpbuffer.h"
#include <assert.h>
#include <pthread.h>
#include <sys/time.h>
#include "access/xlogdefs.h"
#include "logger.h"
#if !defined(C_H) && !defined(__cplusplus)
@ -59,7 +59,7 @@ typedef enum pgut_optsrc
typedef struct pgut_option
{
char type;
char sname; /* short name */
uint8 sname; /* short name */
const char *lname; /* long name */
void *var; /* pointer to variable */
pgut_optsrc allowed; /* allowed source */
@ -94,13 +94,6 @@ extern const char *PROGRAM_VERSION;
extern const char *PROGRAM_URL;
extern const char *PROGRAM_EMAIL;
/* ID of the main thread */
#ifdef WIN32
extern DWORD main_tid;
#else
extern pthread_t main_tid;
#endif
extern void pgut_help(bool details);
/*
@ -118,7 +111,7 @@ extern bool in_cleanup;
extern bool in_password; /* User prompts password */
extern int pgut_getopt(int argc, char **argv, pgut_option options[]);
extern void pgut_readopt(const char *path, pgut_option options[], int elevel);
extern int pgut_readopt(const char *path, pgut_option options[], int elevel);
extern void pgut_getopt_env(pgut_option options[]);
extern void pgut_atexit_push(pgut_atexit_callback callback, void *userdata);
extern void pgut_atexit_pop(pgut_atexit_callback callback, void *userdata);
@ -212,9 +205,10 @@ extern bool parse_int32(const char *value, int32 *result, int flags);
extern bool parse_uint32(const char *value, uint32 *result, int flags);
extern bool parse_int64(const char *value, int64 *result, int flags);
extern bool parse_uint64(const char *value, uint64 *result, int flags);
extern bool parse_time(const char *value, time_t *result);
extern bool parse_time(const char *value, time_t *result, bool utc_default);
extern bool parse_int(const char *value, int *result, int flags,
const char **hintmsg);
extern bool parse_lsn(const char *value, XLogRecPtr *result);
extern void convert_from_base_unit(int64 base_value, int base_unit,
int64 *value, const char **unit);

102
src/utils/thread.c Normal file
View File

@ -0,0 +1,102 @@
/*-------------------------------------------------------------------------
*
* thread.c: - multi-platform pthread implementations.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "thread.h"
pthread_t main_tid = 0;
#ifdef WIN32
#include <errno.h>
typedef struct win32_pthread
{
HANDLE handle;
void *(*routine) (void *);
void *arg;
void *result;
} win32_pthread;
static long mutex_initlock = 0;
static unsigned __stdcall
win32_pthread_run(void *arg)
{
win32_pthread *th = (win32_pthread *)arg;
th->result = th->routine(th->arg);
return 0;
}
int
pthread_create(pthread_t *thread,
pthread_attr_t *attr,
void *(*start_routine) (void *),
void *arg)
{
int save_errno;
win32_pthread *th;
th = (win32_pthread *)pg_malloc(sizeof(win32_pthread));
th->routine = start_routine;
th->arg = arg;
th->result = NULL;
th->handle = (HANDLE)_beginthreadex(NULL, 0, win32_pthread_run, th, 0, NULL);
if (th->handle == NULL)
{
save_errno = errno;
free(th);
return save_errno;
}
*thread = th;
return 0;
}
int
pthread_join(pthread_t th, void **thread_return)
{
if (th == NULL || th->handle == NULL)
return errno = EINVAL;
if (WaitForSingleObject(th->handle, INFINITE) != WAIT_OBJECT_0)
{
_dosmaperr(GetLastError());
return errno;
}
if (thread_return)
*thread_return = th->result;
CloseHandle(th->handle);
free(th);
return 0;
}
#endif /* WIN32 */
int
pthread_lock(pthread_mutex_t *mp)
{
#ifdef WIN32
if (*mp == NULL)
{
while (InterlockedExchange(&mutex_initlock, 1) == 1)
/* loop, another thread own the lock */ ;
if (*mp == NULL)
{
if (pthread_mutex_init(mp, NULL))
return -1;
}
InterlockedExchange(&mutex_initlock, 0);
}
#endif
return pthread_mutex_lock(mp);
}

35
src/utils/thread.h Normal file
View File

@ -0,0 +1,35 @@
/*-------------------------------------------------------------------------
*
* thread.h: - multi-platform pthread implementations.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#ifndef PROBACKUP_THREAD_H
#define PROBACKUP_THREAD_H
#ifdef WIN32
#include "postgres_fe.h"
#include "port/pthread-win32.h"
/* Use native win32 threads on Windows */
typedef struct win32_pthread *pthread_t;
typedef int pthread_attr_t;
#define PTHREAD_MUTEX_INITIALIZER NULL //{ NULL, 0 }
#define PTHREAD_ONCE_INIT false
extern int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
extern int pthread_join(pthread_t th, void **thread_return);
#else
/* Use platform-dependent pthread capability */
#include <pthread.h>
#endif
extern pthread_t main_tid;
extern int pthread_lock(pthread_mutex_t *mp);
#endif /* PROBACKUP_THREAD_H */

View File

@ -11,25 +11,26 @@
#include "pg_probackup.h"
#include <sys/stat.h>
#include <pthread.h>
#include <dirent.h>
static void pgBackupValidateFiles(void *arg);
#include "utils/thread.h"
static void *pgBackupValidateFiles(void *arg);
static void do_validate_instance(void);
static bool corrupted_backup_found = false;
typedef struct
{
parray *files;
bool corrupted;
parray *files;
bool corrupted;
/*
* Return value from the thread.
* 0 means there is no error, 1 - there is an error.
*/
int ret;
} validate_files_args;
} validate_files_arg;
/*
* Validate backup files.
@ -42,8 +43,9 @@ pgBackupValidate(pgBackup *backup)
parray *files;
bool corrupted = false;
bool validation_isok = true;
pthread_t validate_threads[num_threads];
validate_files_args *validate_threads_args[num_threads];
/* arrays with meta info for multi threaded validate */
pthread_t *threads;
validate_files_arg *threads_args;
int i;
/* Revalidation is attempted for DONE, ORPHAN and CORRUPT backups */
@ -77,36 +79,44 @@ pgBackupValidate(pgBackup *backup)
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
__sync_lock_release(&file->lock);
pg_atomic_clear_flag(&file->lock);
}
/* init thread args with own file lists */
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
threads_args = (validate_files_arg *)
palloc(sizeof(validate_files_arg) * num_threads);
/* Validate files */
for (i = 0; i < num_threads; i++)
{
validate_files_args *arg = pg_malloc(sizeof(validate_files_args));
validate_files_arg *arg = &(threads_args[i]);
arg->files = files;
arg->corrupted = false;
/* By default there are some error */
arg->ret = 1;
threads_args[i].ret = 1;
validate_threads_args[i] = arg;
pthread_create(&validate_threads[i], NULL,
(void *(*)(void *)) pgBackupValidateFiles, arg);
pthread_create(&threads[i], NULL, pgBackupValidateFiles, arg);
}
/* Wait theads */
for (i = 0; i < num_threads; i++)
{
pthread_join(validate_threads[i], NULL);
if (validate_threads_args[i]->corrupted)
validate_files_arg *arg = &(threads_args[i]);
pthread_join(threads[i], NULL);
if (arg->corrupted)
corrupted = true;
if (validate_threads_args[i]->ret == 1)
if (arg->ret == 1)
validation_isok = false;
pg_free(validate_threads_args[i]);
}
if (!validation_isok)
elog(ERROR, "Data files validation failed");
pfree(threads);
pfree(threads_args);
/* cleanup */
parray_walk(files, pgFileFree);
parray_free(files);
@ -127,19 +137,19 @@ pgBackupValidate(pgBackup *backup)
* rather throw a WARNING and set arguments->corrupted = true.
* This is necessary to update backup status.
*/
static void
static void *
pgBackupValidateFiles(void *arg)
{
int i;
validate_files_args *arguments = (validate_files_args *)arg;
int i;
validate_files_arg *arguments = (validate_files_arg *)arg;
pg_crc32 crc;
for (i = 0; i < parray_num(arguments->files); i++)
{
struct stat st;
pgFile *file = (pgFile *) parray_get(arguments->files, i);
pgFile *file = (pgFile *) parray_get(arguments->files, i);
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
if (!pg_atomic_test_set_flag(&file->lock))
continue;
if (interrupted)
@ -179,14 +189,13 @@ pgBackupValidateFiles(void *arg)
if (file->write_size != st.st_size)
{
elog(WARNING, "Invalid size of backup file \"%s\" : %lu. Expected %lu",
file->path, (unsigned long) file->write_size,
(unsigned long) st.st_size);
elog(WARNING, "Invalid size of backup file \"%s\" : " INT64_FORMAT ". Expected %lu",
file->path, file->write_size, (unsigned long) st.st_size);
arguments->corrupted = true;
break;
}
crc = pgFileGetCRC(file);
crc = pgFileGetCRC(file->path);
if (crc != file->crc)
{
elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X",
@ -198,6 +207,8 @@ pgBackupValidateFiles(void *arg)
/* Data files validation is successful */
arguments->ret = 0;
return NULL;
}
/*
@ -267,7 +278,7 @@ do_validate_all(void)
static void
do_validate_instance(void)
{
char *current_backup_id;
char *current_backup_id;
int i;
parray *backups;
pgBackup *current_backup = NULL;
@ -279,55 +290,45 @@ do_validate_instance(void)
/* Get list of all backups sorted in order of descending start time */
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
if (backups == NULL)
elog(ERROR, "Failed to get backup list.");
/* Valiate each backup along with its xlog files. */
/* Examine backups one by one and validate them */
for (i = 0; i < parray_num(backups); i++)
{
pgBackup *base_full_backup = NULL;
current_backup = (pgBackup *) parray_get(backups, i);
if (current_backup->backup_mode != BACKUP_MODE_FULL)
{
int j;
for (j = i + 1; j < parray_num(backups); j++)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
if (backup->backup_mode == BACKUP_MODE_FULL)
{
base_full_backup = backup;
break;
}
}
}
else
base_full_backup = current_backup;
/* Valiate each backup along with its xlog files. */
pgBackupValidate(current_backup);
/* There is no point in wal validation for corrupted backup */
/* Ensure that the backup has valid list of parent backups */
if (current_backup->status == BACKUP_STATUS_OK)
{
if (base_full_backup == NULL)
elog(ERROR, "Valid full backup for backup %s is not found.",
base36enc(current_backup->start_time));
pgBackup *base_full_backup = current_backup;
if (current_backup->backup_mode != BACKUP_MODE_FULL)
{
base_full_backup = find_parent_backup(current_backup);
if (base_full_backup == NULL)
elog(ERROR, "Valid full backup for backup %s is not found.",
base36enc(current_backup->start_time));
}
/* Validate corresponding WAL files */
validate_wal(current_backup, arclog_path, 0,
0, base_full_backup->tli);
0, 0, base_full_backup->tli);
}
/* Mark every incremental backup between corrupted backup and nearest FULL backup as orphans */
if (current_backup->status == BACKUP_STATUS_CORRUPT)
{
int j;
corrupted_backup_found = true;
current_backup_id = base36enc_dup(current_backup->start_time);
for (j = i - 1; j >= 0; j--)
{
pgBackup *backup = (pgBackup *) parray_get(backups, j);
if (backup->backup_mode == BACKUP_MODE_FULL)
break;
if (backup->status != BACKUP_STATUS_OK)

View File

@ -60,6 +60,10 @@ def load_tests(loader, tests, pattern):
# ptrack backup on replica should work correctly
# archive:
# immediate recovery and full recovery
# backward compatibility:
# previous version catalog must be readable by newer version
# incremental chain from previous version can be continued
# backups from previous version can be restored
# 10vanilla_1.3ptrack +
# 10vanilla+
# 9.6vanilla_1.3ptrack +

View File

@ -29,7 +29,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.safe_psql(
"postgres",
@ -45,11 +45,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node)
node.start()
while node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
# Recreate backup calagoue
self.init_pb(backup_dir)
@ -65,11 +61,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=["--recovery-target-action=promote"])
node.start()
while node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
self.assertEqual(
result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
@ -97,7 +89,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FIRST TIMELINE
node.safe_psql(
@ -117,11 +109,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.start()
while node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
if self.verbose:
print(node.safe_psql(
"postgres",
@ -152,11 +141,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.start()
while node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
if self.verbose:
print(
@ -184,11 +169,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.start()
while node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
if self.verbose:
print('Fourth timeline')
print(node.safe_psql(
@ -200,10 +182,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
if self.verbose:
print('Fifth timeline')
print(node.safe_psql(
@ -215,10 +195,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
if self.verbose:
print('Sixth timeline')
print(node.safe_psql(
@ -269,7 +247,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
node.append_conf(
'postgresql.auto.conf', "archive_command = '{0} %p %f'".format(
archive_script_path))
node.start()
node.slow_start()
try:
self.backup_node(
backup_dir, 'node', node,
@ -330,7 +308,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
f.flush()
f.close()
node.start()
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
@ -390,7 +368,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
f.flush()
f.close()
node.start()
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
@ -426,7 +404,11 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_replica_archive(self):
"""make node without archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
"""
make node without archiving, take stream backup and
turn it into replica, set replica with archiving,
make archive backup from replica
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@ -441,7 +423,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)
master.start()
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
@ -459,15 +441,18 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# Settings for Replica
self.restore_node(backup_dir, 'master', replica)
self.set_replica(master, replica, synchronous=True)
self.add_instance(backup_dir, 'replica', replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
replica.slow_start(replica=True)
# Check data correctness on replica
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Change data on master, take FULL backup from replica, restore taken backup and check that restored data equal to original data
# Change data on master, take FULL backup from replica,
# restore taken backup and check that restored data equal
# to original data
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
@ -496,12 +481,14 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Change data on master, make PAGE backup from replica, restore taken backup and check that restored data equal to original data
# Change data on master, make PAGE backup from replica,
# restore taken backup and check that restored data equal
# to original data
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
@ -526,7 +513,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -560,7 +547,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)
master.start()
master.slow_start()
master.psql(
"postgres",
@ -586,7 +573,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
# SET ARCHIVING FOR REPLICA
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
replica.slow_start(replica=True)
# CHECK LOGICAL CORRECTNESS on REPLICA
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
@ -641,7 +628,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)
master.start()
master.slow_start()
master.psql(
"postgres",
@ -668,7 +655,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# self.add_instance(backup_dir, 'replica', replica)
# SET ARCHIVING FOR REPLICA
# self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
replica.slow_start(replica=True)
# CHECK LOGICAL CORRECTNESS on REPLICA
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
@ -763,7 +750,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# Check data correctness
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.start()
node.slow_start()
self.assertEqual(
result,
node.safe_psql(
@ -795,7 +783,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
if self.get_version(node) < self.version_to_num('10.0'):
return unittest.skip('You need PostgreSQL 10 for this test')
else:
pg_receivexlog_path = node.get_bin_path('pg_receivewal')
pg_receivexlog_path = self.get_bin_path('pg_receivewal')
pg_receivexlog = self.run_binary(
[
@ -834,7 +822,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# Check data correctness
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.start()
node.slow_start()
self.assertEqual(
result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
'data after restore not equal to original data')

View File

@ -29,15 +29,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'node', node)
node.start()
# full backup mode
# with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
# backup_log.write(self.backup_node(node, options=["--verbose"]))
backup_id = self.backup_node(backup_dir, 'node', node)
show_backup = self.show_pb(backup_dir, 'node')[0]
self.assertEqual(show_backup['Status'], "OK")
self.assertEqual(show_backup['Mode'], "FULL")
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "FULL")
# postmaster.pid and postmaster.opts shouldn't be copied
excluded = True
@ -61,29 +57,29 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# print self.show_pb(node)
show_backup = self.show_pb(backup_dir, 'node')[1]
self.assertEqual(show_backup['Status'], "OK")
self.assertEqual(show_backup['Mode'], "PAGE")
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PAGE")
# Check parent backup
self.assertEqual(
backup_id,
self.show_pb(
backup_dir, 'node',
backup_id=show_backup['ID'])["parent-backup-id"])
backup_id=show_backup['id'])["parent-backup-id"])
# ptrack backup mode
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
show_backup = self.show_pb(backup_dir, 'node')[2]
self.assertEqual(show_backup['Status'], "OK")
self.assertEqual(show_backup['Mode'], "PTRACK")
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PTRACK")
# Check parent backup
self.assertEqual(
page_backup_id,
self.show_pb(
backup_dir, 'node',
backup_id=show_backup['ID'])["parent-backup-id"])
backup_id=show_backup['id'])["parent-backup-id"])
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -106,7 +102,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.backup_node(
backup_dir, 'node', node,
options=["-C"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
node.stop()
# Clean after yourself
@ -162,7 +158,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
repr(e.message), self.cmd))
self.assertEqual(
self.show_pb(backup_dir, 'node')[0]['Status'],
self.show_pb(backup_dir, 'node')[0]['status'],
"ERROR")
# Clean after yourself
@ -227,7 +223,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.assertEqual(
self.show_pb(backup_dir, 'node', backup_id)['status'], "CORRUPT")
self.assertEqual(
self.show_pb(backup_dir, 'node')[1]['Status'], "ERROR")
self.show_pb(backup_dir, 'node')[1]['status'], "ERROR")
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -250,12 +246,12 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.backup_node(
backup_dir, 'node', node,
backup_type="full", options=["-j", "4"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
self.backup_node(
backup_dir, 'node', node,
backup_type="ptrack", options=["-j", "4"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -282,11 +278,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
self.backup_node(
backup_dir, 'node', node,
backup_type="ptrack", options=["-j", "4", "--stream"])
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -342,7 +338,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
f.close
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK',
self.show_pb(backup_dir, 'node')[1]['status'] == 'OK',
"Backup Status should be OK")
# Clean after yourself
@ -415,7 +411,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
repr(e.message), self.cmd))
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['Status'] == 'ERROR',
self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR',
"Backup Status should be ERROR")
# Clean after yourself

View File

@ -4,7 +4,7 @@ restore
pg_probackup restore -B backupdir --instance instance_name
[-D datadir]
[ -i backup_id | [{--time=time | --xid=xid } [--inclusive=boolean]]][--timeline=timeline] [-T OLDDIR=NEWDIR]
[ -i backup_id | [{--time=time | --xid=xid | --lsn=lsn } [--inclusive=boolean]]][--timeline=timeline] [-T OLDDIR=NEWDIR]
[-j num_threads] [--progress] [-q] [-v]
"""

View File

@ -83,7 +83,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -98,7 +99,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -113,7 +115,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()
@ -187,7 +190,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -202,7 +206,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -217,7 +222,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()
@ -294,7 +300,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -309,7 +316,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -324,7 +332,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()
@ -401,7 +410,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -416,7 +426,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -431,7 +442,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()

View File

@ -44,13 +44,13 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
id_1 = show_backups[0]['ID']
id_2 = show_backups[1]['ID']
id_3 = show_backups[2]['ID']
id_1 = show_backups[0]['id']
id_2 = show_backups[1]['id']
id_3 = show_backups[2]['id']
self.delete_pb(backup_dir, 'node', id_2)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(show_backups[0]['ID'], id_1)
self.assertEqual(show_backups[1]['ID'], id_3)
self.assertEqual(show_backups[0]['id'], id_1)
self.assertEqual(show_backups[1]['id'], id_3)
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -82,15 +82,15 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
self.assertEqual(len(show_backups), 4)
# delete first page backup
self.delete_pb(backup_dir, 'node', show_backups[1]['ID'])
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 2)
self.assertEqual(show_backups[0]['Mode'], "FULL")
self.assertEqual(show_backups[0]['Status'], "OK")
self.assertEqual(show_backups[1]['Mode'], "FULL")
self.assertEqual(show_backups[1]['Status'], "OK")
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
self.assertEqual(show_backups[0]['status'], "OK")
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
self.assertEqual(show_backups[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -122,15 +122,15 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
self.assertEqual(len(show_backups), 4)
# delete first page backup
self.delete_pb(backup_dir, 'node', show_backups[1]['ID'])
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 2)
self.assertEqual(show_backups[0]['Mode'], "FULL")
self.assertEqual(show_backups[0]['Status'], "OK")
self.assertEqual(show_backups[1]['Mode'], "FULL")
self.assertEqual(show_backups[1]['Status'], "OK")
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
self.assertEqual(show_backups[0]['status'], "OK")
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
self.assertEqual(show_backups[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -508,10 +508,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
while restored_node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
restored_node.slow_start()
result_new = restored_node.safe_psql(
"postgres", "select * from pgbench_accounts")
@ -946,11 +943,8 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start()
node_restored.slow_start()
while node_restored.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
result_new = node_restored.safe_psql(
"postgres", "select * from t_heap")
@ -1191,7 +1185,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
f.close
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK',
self.show_pb(backup_dir, 'node')[1]['status'] == 'OK',
"Backup Status should be OK")
# Clean after yourself
@ -1264,7 +1258,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
repr(e.message), self.cmd))
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['Status'] == 'ERROR',
self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR',
"Backup Status should be ERROR")
# Clean after yourself

View File

@ -43,25 +43,33 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
"WHERE oid = pg_my_temp_schema()")[0][0]
conn.commit()
temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace("pg_", "")
temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace(
"pg_", "")
conn.commit()
conn.execute("create index test_idx on test (generate_series)")
conn.commit()
heap_path = conn.execute("select pg_relation_filepath('test')")[0][0]
heap_path = conn.execute(
"select pg_relation_filepath('test')")[0][0]
conn.commit()
index_path = conn.execute("select pg_relation_filepath('test_idx')")[0][0]
index_path = conn.execute(
"select pg_relation_filepath('test_idx')")[0][0]
conn.commit()
heap_oid = conn.execute("select 'test'::regclass::oid")[0][0]
conn.commit()
toast_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0]
toast_path = conn.execute(
"select pg_relation_filepath('{0}.{1}')".format(
temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0]
conn.commit()
toast_idx_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid) + "_index"))[0][0]
toast_idx_path = conn.execute(
"select pg_relation_filepath('{0}.{1}')".format(
temp_toast_schema_name,
"pg_toast_" + str(heap_oid) + "_index"))[0][0]
conn.commit()
temp_table_filename = os.path.basename(heap_path)

View File

@ -25,6 +25,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--replica-timeout=timeout]
pg_probackup show-config -B backup-dir --instance=instance_name
[--format=format]
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
[-C] [--stream [-S slot-name]] [--backup-pg-log]
@ -49,26 +50,31 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--master-port=port] [--master-user=user_name]
[--replica-timeout=timeout]
pg_probackup restore -B backup-dir --instance=instance_name
[-D pgdata-dir] [-i backup-id] [--progress]
[--time=time|--xid=xid [--inclusive=boolean]]
[--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]
[--timeline=timeline] [-T OLDDIR=NEWDIR]
[--immediate] [--recovery-target-name=target-name]
[--recovery-target-action=pause|promote|shutdown]
[--restore-as-replica]
[--no-validate]
pg_probackup validate -B backup-dir [--instance=instance_name]
[-i backup-id] [--progress]
[--time=time|--xid=xid [--inclusive=boolean]]
[--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]
[--recovery-target-name=target-name]
[--timeline=timeline]
pg_probackup show -B backup-dir
[--instance=instance_name [-i backup-id]]
[--format=format]
pg_probackup delete -B backup-dir --instance=instance_name
[--wal] [-i backup-id | --expired]
pg_probackup merge -B backup-dir --instance=instance_name
-i backup-id
pg_probackup add-instance -B backup-dir -D pgdata-dir
--instance=instance_name

View File

@ -1 +1 @@
pg_probackup 2.0.17
pg_probackup 2.0.18

View File

@ -191,7 +191,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
node.slow_start()
# Logical comparison
self.assertEqual(
result,
@ -290,7 +290,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
node.slow_start()
# Logical comparison
self.assertEqual(
result,

View File

@ -12,6 +12,7 @@ import select
import psycopg2
from time import sleep
import re
import json
idx_ptrack = {
't_heap': {
@ -111,6 +112,39 @@ class ProbackupException(Exception):
return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd)
def slow_start(self, replica=False):
# wait for https://github.com/postgrespro/testgres/pull/50
# self.poll_query_until(
# "postgres",
# "SELECT not pg_is_in_recovery()",
# raise_operational_error=False)
self.start()
if not replica:
while True:
try:
self.poll_query_until(
"postgres",
"SELECT not pg_is_in_recovery()")
break
except Exception as e:
continue
else:
self.poll_query_until(
"postgres",
"SELECT pg_is_in_recovery()")
# while True:
# try:
# self.poll_query_until(
# "postgres",
# "SELECT pg_is_in_recovery()")
# break
# except ProbackupException as e:
# continue
class ProbackupTest(object):
# Class attributes
enterprise = is_enterprise()
@ -204,6 +238,8 @@ class ProbackupTest(object):
os.makedirs(real_base_dir)
node = testgres.get_new_node('test', base_dir=real_base_dir)
# bound method slow_start() to 'node' class instance
node.slow_start = slow_start.__get__(node)
node.should_rm_dirs = True
node.init(
initdb_params=initdb_params, allow_streaming=set_replication)
@ -578,6 +614,16 @@ class ProbackupTest(object):
return self.run_pb(cmd_list + options, async, gdb)
def merge_backup(self, backup_dir, instance, backup_id):
cmd_list = [
"merge",
"-B", backup_dir,
"--instance={0}".format(instance),
"-i", backup_id
]
return self.run_pb(cmd_list)
def restore_node(
self, backup_dir, instance, node=False,
data_dir=None, backup_id=None, options=[]
@ -598,7 +644,7 @@ class ProbackupTest(object):
def show_pb(
self, backup_dir, instance=None, backup_id=None,
options=[], as_text=False
options=[], as_text=False, as_json=True
):
backup_list = []
@ -613,63 +659,83 @@ class ProbackupTest(object):
if backup_id:
cmd_list += ["-i", backup_id]
if as_json:
cmd_list += ["--format=json"]
if as_text:
# You should print it when calling as_text=true
return self.run_pb(cmd_list + options)
# get show result as list of lines
show_splitted = self.run_pb(cmd_list + options).splitlines()
if instance is not None and backup_id is None:
# cut header(ID, Mode, etc) from show as single string
header = show_splitted[1:2][0]
# cut backup records from show as single list
# with string for every backup record
body = show_splitted[3:]
# inverse list so oldest record come first
body = body[::-1]
# split string in list with string for every header element
header_split = re.split(" +", header)
# Remove empty items
for i in header_split:
if i == '':
header_split.remove(i)
if as_json:
data = json.loads(self.run_pb(cmd_list + options))
# print(data)
for instance_data in data:
# find specific instance if requested
if instance and instance_data['instance'] != instance:
continue
header_split = [
header_element.rstrip() for header_element in header_split
]
for backup_record in body:
backup_record = backup_record.rstrip()
# split list with str for every backup record element
backup_record_split = re.split(" +", backup_record)
# Remove empty items
for i in backup_record_split:
if i == '':
backup_record_split.remove(i)
if len(header_split) != len(backup_record_split):
print(warning.format(
header=header, body=body,
header_split=header_split,
body_split=backup_record_split)
)
exit(1)
new_dict = dict(zip(header_split, backup_record_split))
backup_list.append(new_dict)
for backup in reversed(instance_data['backups']):
# find specific backup if requested
if backup_id:
if backup['id'] == backup_id:
return backup
else:
backup_list.append(backup)
return backup_list
else:
# cut out empty lines and lines started with #
# and other garbage then reconstruct it as dictionary
# print show_splitted
sanitized_show = [item for item in show_splitted if item]
sanitized_show = [
item for item in sanitized_show if not item.startswith('#')
]
# print sanitized_show
for line in sanitized_show:
name, var = line.partition(" = ")[::2]
var = var.strip('"')
var = var.strip("'")
specific_record[name.strip()] = var
return specific_record
show_splitted = self.run_pb(cmd_list + options).splitlines()
if instance is not None and backup_id is None:
# cut header(ID, Mode, etc) from show as single string
header = show_splitted[1:2][0]
# cut backup records from show as single list
# with string for every backup record
body = show_splitted[3:]
# inverse list so oldest record come first
body = body[::-1]
# split string in list with string for every header element
header_split = re.split(" +", header)
# Remove empty items
for i in header_split:
if i == '':
header_split.remove(i)
continue
header_split = [
header_element.rstrip() for header_element in header_split
]
for backup_record in body:
backup_record = backup_record.rstrip()
# split list with str for every backup record element
backup_record_split = re.split(" +", backup_record)
# Remove empty items
for i in backup_record_split:
if i == '':
backup_record_split.remove(i)
if len(header_split) != len(backup_record_split):
print(warning.format(
header=header, body=body,
header_split=header_split,
body_split=backup_record_split)
)
exit(1)
new_dict = dict(zip(header_split, backup_record_split))
backup_list.append(new_dict)
return backup_list
else:
# cut out empty lines and lines started with #
# and other garbage then reconstruct it as dictionary
# print show_splitted
sanitized_show = [item for item in show_splitted if item]
sanitized_show = [
item for item in sanitized_show if not item.startswith('#')
]
# print sanitized_show
for line in sanitized_show:
name, var = line.partition(" = ")[::2]
var = var.strip('"')
var = var.strip("'")
specific_record[name.strip()] = var
return specific_record
def validate_pb(
self, backup_dir, instance=None,
@ -851,13 +917,26 @@ class ProbackupTest(object):
return num
def switch_wal_segment(self, node):
""" Execute pg_switch_wal/xlog() in given node"""
if self.version_to_num(
node.safe_psql("postgres", "show server_version")
) >= self.version_to_num('10.0'):
node.safe_psql("postgres", "select pg_switch_wal()")
"""
Execute pg_switch_wal/xlog() in given node
Args:
node: an instance of PostgresNode or NodeConnection class
"""
if isinstance(node, testgres.PostgresNode):
if self.version_to_num(
node.safe_psql("postgres", "show server_version")
) >= self.version_to_num('10.0'):
node.safe_psql("postgres", "select pg_switch_wal()")
else:
node.safe_psql("postgres", "select pg_switch_xlog()")
else:
node.safe_psql("postgres", "select pg_switch_xlog()")
if self.version_to_num(
node.execute("show server_version")[0][0]
) >= self.version_to_num('10.0'):
node.execute("select pg_switch_wal()")
else:
node.execute("select pg_switch_xlog()")
sleep(1)
def get_version(self, node):

454
tests/merge.py Normal file
View File

@ -0,0 +1,454 @@
# coding: utf-8
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest
module_name = "merge"
class MergeTest(ProbackupTest, unittest.TestCase):
def test_merge_full_page(self):
"""
Test MERGE command, it merges FULL backup with target PAGE backups
"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=["--data-checksums"]
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, "node", node)
self.set_archiving(backup_dir, "node", node)
node.start()
# Do full backup
self.backup_node(backup_dir, "node", node)
show_backup = self.show_pb(backup_dir, "node")[0]
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "FULL")
# Fill with data
with node.connect() as conn:
conn.execute("create table test (id int)")
conn.execute(
"insert into test select i from generate_series(1,10) s(i)")
conn.commit()
# Do first page backup
self.backup_node(backup_dir, "node", node, backup_type="page")
show_backup = self.show_pb(backup_dir, "node")[1]
# sanity check
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "PAGE")
# Fill with data
with node.connect() as conn:
conn.execute(
"insert into test select i from generate_series(1,10) s(i)")
count1 = conn.execute("select count(*) from test")
conn.commit()
# Do second page backup
self.backup_node(backup_dir, "node", node, backup_type="page")
show_backup = self.show_pb(backup_dir, "node")[2]
page_id = show_backup["id"]
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
# sanity check
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "PAGE")
# Merge all backups
self.merge_backup(backup_dir, "node", page_id)
show_backups = self.show_pb(backup_dir, "node")
# sanity check
self.assertEqual(len(show_backups), 1)
self.assertEqual(show_backups[0]["status"], "OK")
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
# Drop node and restore it
node.cleanup()
self.restore_node(backup_dir, 'node', node)
# Check physical correctness
if self.paranoia:
pgdata_restored = self.pgdata_content(
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.slow_start()
# Check restored node
count2 = node.execute("postgres", "select count(*) from test")
self.assertEqual(count1, count2)
# Clean after yourself
node.cleanup()
self.del_test_dir(module_name, fname)
def test_merge_compressed_backups(self):
"""
Test MERGE command with compressed backups
"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=["--data-checksums"]
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, "node", node)
self.set_archiving(backup_dir, "node", node)
node.start()
# Do full compressed backup
self.backup_node(backup_dir, "node", node, options=[
'--compress-algorithm=zlib'])
show_backup = self.show_pb(backup_dir, "node")[0]
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "FULL")
# Fill with data
with node.connect() as conn:
conn.execute("create table test (id int)")
conn.execute(
"insert into test select i from generate_series(1,10) s(i)")
count1 = conn.execute("select count(*) from test")
conn.commit()
# Do compressed page backup
self.backup_node(
backup_dir, "node", node, backup_type="page",
options=['--compress-algorithm=zlib'])
show_backup = self.show_pb(backup_dir, "node")[1]
page_id = show_backup["id"]
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "PAGE")
# Merge all backups
self.merge_backup(backup_dir, "node", page_id)
show_backups = self.show_pb(backup_dir, "node")
self.assertEqual(len(show_backups), 1)
self.assertEqual(show_backups[0]["status"], "OK")
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
# Drop node and restore it
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.slow_start()
# Check restored node
count2 = node.execute("postgres", "select count(*) from test")
self.assertEqual(count1, count2)
# Clean after yourself
node.cleanup()
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_merge_tablespaces(self):
"""
Some test here
"""
def test_merge_page_truncate(self):
"""
make node, create table, take full backup,
delete last 3 pages, vacuum relation,
take page backup, merge full and page,
restore last page backup and check data correctness
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '300s',
'autovacuum': 'off'
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
"postgres",
"create sequence t_seq; "
"create table t_heap tablespace somedata as select i as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1024) i;")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"delete from t_heap where ctid >= '(11,0)'")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(
backup_dir, 'node', node, backup_type='page')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
page_id = self.show_pb(backup_dir, "node")[1]["id"]
self.merge_backup(backup_dir, "node", page_id)
self.validate_pb(backup_dir)
old_tablespace = self.get_tblspace_path(node, 'somedata')
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
"--recovery-target-action=promote"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
# Logical comparison
result1 = node.safe_psql(
"postgres",
"select * from t_heap")
result2 = node_restored.safe_psql(
"postgres",
"select * from t_heap")
self.assertEqual(result1, result2)
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_merge_delta_truncate(self):
"""
make node, create table, take full backup,
delete last 3 pages, vacuum relation,
take page backup, merge full and page,
restore last page backup and check data correctness
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '300s',
'autovacuum': 'off'
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
"postgres",
"create sequence t_seq; "
"create table t_heap tablespace somedata as select i as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1024) i;")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"delete from t_heap where ctid >= '(11,0)'")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
page_id = self.show_pb(backup_dir, "node")[1]["id"]
self.merge_backup(backup_dir, "node", page_id)
self.validate_pb(backup_dir)
old_tablespace = self.get_tblspace_path(node, 'somedata')
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
"--recovery-target-action=promote"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
# Logical comparison
result1 = node.safe_psql(
"postgres",
"select * from t_heap")
result2 = node_restored.safe_psql(
"postgres",
"select * from t_heap")
self.assertEqual(result1, result2)
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_merge_ptrack_truncate(self):
"""
make node, create table, take full backup,
delete last 3 pages, vacuum relation,
take page backup, merge full and page,
restore last page backup and check data correctness
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '300s',
'autovacuum': 'off'
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node_restored.cleanup()
node.start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
"postgres",
"create sequence t_seq; "
"create table t_heap tablespace somedata as select i as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1024) i;")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"delete from t_heap where ctid >= '(11,0)'")
node.safe_psql(
"postgres",
"vacuum t_heap")
self.backup_node(
backup_dir, 'node', node, backup_type='delta')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
page_id = self.show_pb(backup_dir, "node")[1]["id"]
self.merge_backup(backup_dir, "node", page_id)
self.validate_pb(backup_dir)
old_tablespace = self.get_tblspace_path(node, 'somedata')
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4",
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
"--recovery-target-action=promote"])
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
# Logical comparison
result1 = node.safe_psql(
"postgres",
"select * from t_heap")
result2 = node_restored.safe_psql(
"postgres",
"select * from t_heap")
self.assertEqual(result1, result2)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -12,6 +12,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
# @unittest.expectedFailure
def test_help_1(self):
"""help options"""
self.maxDiff = None
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
with open(os.path.join(self.dir_path, "expected/option_help.out"), "rb") as help_out:
@ -199,7 +200,8 @@ class OptionTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'node', node)
# invalid option in pg_probackup.conf
with open(os.path.join(backup_dir, "backups", "node", "pg_probackup.conf"), "a") as conf:
pbconf_path = os.path.join(backup_dir, "backups", "node", "pg_probackup.conf")
with open(pbconf_path, "a") as conf:
conf.write("TIMELINEID=1\n")
try:
@ -209,7 +211,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertEqual(e.message,
'ERROR: invalid option "TIMELINEID"\n',
'ERROR: invalid option "TIMELINEID" in file "{0}"\n'.format(pbconf_path),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
# Clean after yourself

View File

@ -3,8 +3,6 @@ import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
import subprocess
import time
module_name = 'page'
@ -33,8 +31,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
)
base_dir="{0}/{1}/node_restored".format(module_name, fname))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -49,32 +46,27 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"create table t_heap tablespace somedata as select i as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1024) i;"
)
"from generate_series(0,1024) i;")
node.safe_psql(
"postgres",
"vacuum t_heap"
)
"vacuum t_heap")
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"delete from t_heap where ctid >= '(11,0)'"
)
"delete from t_heap where ctid >= '(11,0)'")
node.safe_psql(
"postgres",
"vacuum t_heap"
)
"vacuum t_heap")
self.backup_node(
backup_dir, 'node', node, backup_type='page',
options=['--log-level-file=verbose']
)
options=['--log-level-file=verbose'])
self.backup_node(
backup_dir, 'node', node, backup_type='page'
)
backup_dir, 'node', node, backup_type='page')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
@ -87,8 +79,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
options=[
"-j", "4",
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
"--recovery-target-action=promote"]
)
"--recovery-target-action=promote"])
# Physical comparison
if self.paranoia:
@ -97,21 +88,17 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
while node_restored.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node_restored.slow_start()
# Logical comparison
result1 = node.safe_psql(
"postgres",
"select * from t_heap"
)
"select * from t_heap")
result2 = node_restored.safe_psql(
"postgres",
"select * from t_heap"
)
"select * from t_heap")
self.assertEqual(result1, result2)
# Clean after yourself
@ -175,7 +162,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
backup_id=full_backup_id, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -188,7 +175,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
backup_id=page_backup_id, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -254,7 +241,8 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -271,7 +259,8 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -349,10 +338,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
while restored_node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
restored_node.slow_start()
result_new = restored_node.safe_psql(
"postgres", "select * from pgbench_accounts")
@ -526,3 +512,130 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_parallel_pagemap(self):
"""
Test for parallel WAL segments reading, during which pagemap is built
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={
"hot_standby": "on"
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node_restored.cleanup()
self.set_archiving(backup_dir, 'node', node)
node.start()
# Do full backup
self.backup_node(backup_dir, 'node', node)
show_backup = self.show_pb(backup_dir, 'node')[0]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "FULL")
# Fill instance with data and make several WAL segments ...
with node.connect() as conn:
conn.execute("create table test (id int)")
for x in range(0, 8):
conn.execute(
"insert into test select i from generate_series(1,100) s(i)")
conn.commit()
self.switch_wal_segment(conn)
count1 = conn.execute("select count(*) from test")
# ... and do page backup with parallel pagemap
self.backup_node(
backup_dir, 'node', node, backup_type="page", options=["-j", "4"])
show_backup = self.show_pb(backup_dir, 'node')[1]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PAGE")
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
# Restore it
self.restore_node(backup_dir, 'node', node_restored)
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
# Check restored node
count2 = node_restored.execute("postgres", "select count(*) from test")
self.assertEqual(count1, count2)
# Clean after yourself
node.cleanup()
node_restored.cleanup()
self.del_test_dir(module_name, fname)
def test_parallel_pagemap_1(self):
"""
Test for parallel WAL segments reading, during which pagemap is built
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
# Do full backup
self.backup_node(backup_dir, 'node', node)
show_backup = self.show_pb(backup_dir, 'node')[0]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "FULL")
# Fill instance with data and make several WAL segments ...
node.pgbench_init(scale=10)
# do page backup in single thread
page_id = self.backup_node(
backup_dir, 'node', node, backup_type="page")
self.delete_pb(backup_dir, 'node', page_id)
# ... and do page backup with parallel pagemap
self.backup_node(
backup_dir, 'node', node, backup_type="page", options=["-j", "4"])
show_backup = self.show_pb(backup_dir, 'node')[1]
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PAGE")
# Drop node and restore it
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.start()
# Clean after yourself
node.cleanup()
self.del_test_dir(module_name, fname)

View File

@ -63,7 +63,7 @@ class ArchiveCheck(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
backup_id = self.show_pb(backup_dir, 'node')[0]['ID']
backup_id = self.show_pb(backup_dir, 'node')[0]['id']
self.assertEqual(
'ERROR', self.show_pb(backup_dir, 'node', backup_id)['status'],
'Backup should have ERROR status')

View File

@ -268,7 +268,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
@ -430,7 +431,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
@ -503,7 +505,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
@ -583,10 +586,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd)
)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -604,13 +604,11 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
)
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
@ -687,10 +685,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd)
)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -711,13 +707,11 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
)
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
@ -811,7 +805,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_page_pgpro417(self):
"""Make archive node, take full backup, take page backup, delete page backup. Try to take ptrack backup, which should fail"""
"""
Make archive node, take full backup, take page backup,
delete page backup. Try to take ptrack backup, which should fail
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -880,7 +877,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_full_pgpro417(self):
"""Make node, take two full backups, delete full second backup. Try to take ptrack backup, which should fail"""
"""
Make node, take two full backups, delete full second backup.
Try to take ptrack backup, which should fail
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -954,7 +954,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_create_db(self):
"""Make node, take full backup, create database db1, take ptrack backup, restore database and check it presense"""
"""
Make node, take full backup, create database db1, take ptrack backup,
restore database and check it presense
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -1017,7 +1020,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
@ -1046,7 +1050,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
@ -1151,17 +1156,15 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# GET RESTORED PGDATA AND COMPARE
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start()
node_restored.slow_start()
while node_restored.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
result_new = node_restored.safe_psql(
"postgres", "select * from t_heap")
@ -1229,7 +1232,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# GET PHYSICAL CONTENT and COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
@ -1240,7 +1244,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_drop_tablespace(self):
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
"""
Make node, create table, alter table tablespace, take ptrack backup,
move table from tablespace, take ptrack backup
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -1321,7 +1328,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_ptrack_alter_tablespace(self):
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
"""
Make node, create table, alter table tablespace, take ptrack backup,
move table from tablespace, take ptrack backup
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -1379,16 +1389,14 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
restored_node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
while restored_node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
restored_node.slow_start()
# COMPARE LOGICAL CONTENT
result_new = restored_node.safe_psql(
@ -1416,17 +1424,14 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
restored_node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
while restored_node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
time.sleep(1)
restored_node.slow_start()
result_new = restored_node.safe_psql(
"postgres", "select * from t_heap")
@ -1437,7 +1442,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_ptrack_multiple_segments(self):
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
"""
Make node, create table, alter table tablespace,
take ptrack backup, move table from tablespace, take ptrack backup
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1446,9 +1454,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica', 'max_wal_senders': '2',
'ptrack_enable': 'on', 'fsync': 'off', 'shared_buffers': '128MB',
'maintenance_work_mem': '1GB', 'autovacuum': 'off',
'full_page_writes': 'off'}
'ptrack_enable': 'on', 'fsync': 'off',
'autovacuum': 'off',
'full_page_writes': 'off'
}
)
self.init_pb(backup_dir)
@ -1514,17 +1523,14 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# GET PHYSICAL CONTENT FROM NODE_RESTORED
if self.paranoia:
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
restored_node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
while restored_node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
time.sleep(1)
restored_node.slow_start()
result_new = restored_node.safe_psql(
"postgres",

View File

@ -46,7 +46,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'cluster t_heap using t_btree')
@ -103,7 +103,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'cluster t_heap using t_gist')
@ -172,7 +172,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream',
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
@ -242,7 +242,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream',
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')

View File

@ -45,7 +45,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
@ -100,7 +100,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])

View File

@ -45,7 +45,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])

View File

@ -43,7 +43,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'vacuum freeze t_heap')
node.safe_psql('postgres', 'checkpoint')
@ -111,7 +111,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Take PTRACK backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j100',
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'vacuum freeze t_heap')

View File

@ -44,7 +44,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'vacuum full t_heap')

View File

@ -44,7 +44,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id > 128;')
node.safe_psql('postgres', 'vacuum t_heap')
@ -116,7 +116,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Take PTRACK backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j100',
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id > 128;')

View File

@ -50,7 +50,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.set_replica(master, replica)
# Check data correctness on replica
replica.start(["-t", "600"])
replica.slow_start(replica=True)
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -82,7 +82,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -113,7 +113,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -143,7 +143,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'master', master)
# force more frequent wal switch
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
master.start()
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
@ -166,7 +166,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# Settings for Replica
self.set_replica(master, replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start(["-t", "600"])
replica.slow_start(replica=True)
# Check data correctness on replica
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
@ -200,7 +200,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -231,7 +231,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -260,7 +260,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'master', master)
# force more frequent wal switch
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
master.start()
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
@ -287,15 +287,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
replica.start(["-t", "600"])
time.sleep(1)
self.assertEqual(
master.safe_psql(
"postgres",
"select exists(select * from pg_stat_replication)"
).rstrip(),
't')
replica.start()
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -53,10 +53,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
recovery_conf = os.path.join(node.data_dir, "recovery.conf")
self.assertEqual(os.path.isfile(recovery_conf), True)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -104,10 +101,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -149,11 +143,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start(params=['-t', '10'])
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
options=['-T', '10', '-c', '2', '--no-vacuum'])
@ -181,11 +171,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node)["recovery_target_timeline"]
self.assertEqual(int(recovery_target_timeline), target_tli)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -234,11 +220,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -297,11 +279,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
self.assertEqual(
@ -366,11 +344,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
self.assertEqual(
@ -379,6 +353,153 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_restore_to_lsn_inclusive(self):
"""recovery to target lsn"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
if self.get_version(node) < self.version_to_num('10.0'):
self.del_test_dir(module_name, fname)
return
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.pgbench_init(scale=2)
with node.connect("postgres") as con:
con.execute("CREATE TABLE tbl0005 (a int)")
con.commit()
backup_id = self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
with node.connect("postgres") as con:
con.execute("INSERT INTO tbl0005 VALUES (1)")
con.commit()
res = con.execute("SELECT pg_current_wal_lsn()")
con.commit()
con.execute("INSERT INTO tbl0005 VALUES (2)")
con.commit()
xlogid, xrecoff = res[0][0].split('/')
xrecoff = hex(int(xrecoff, 16) + 1)[2:]
target_lsn = "{0}/{1}".format(xlogid, xrecoff)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
node.stop()
node.cleanup()
self.assertIn(
"INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(
backup_dir, 'node', node,
options=[
"-j", "4", '--lsn={0}'.format(target_lsn),
"--recovery-target-action=promote"]
),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
self.assertEqual(
len(node.execute("postgres", "SELECT * FROM tbl0005")), 2)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_restore_to_lsn_not_inclusive(self):
"""recovery to target lsn"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
if self.get_version(node) < self.version_to_num('10.0'):
self.del_test_dir(module_name, fname)
return
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.pgbench_init(scale=2)
with node.connect("postgres") as con:
con.execute("CREATE TABLE tbl0005 (a int)")
con.commit()
backup_id = self.backup_node(backup_dir, 'node', node)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
with node.connect("postgres") as con:
con.execute("INSERT INTO tbl0005 VALUES (1)")
con.commit()
res = con.execute("SELECT pg_current_wal_lsn()")
con.commit()
con.execute("INSERT INTO tbl0005 VALUES (2)")
con.commit()
xlogid, xrecoff = res[0][0].split('/')
xrecoff = hex(int(xrecoff, 16) + 1)[2:]
target_lsn = "{0}/{1}".format(xlogid, xrecoff)
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pgbench.wait()
pgbench.stdout.close()
node.stop()
node.cleanup()
self.assertIn(
"INFO: Restore of backup {0} completed.".format(backup_id),
self.restore_node(
backup_dir, 'node', node,
options=[
"--inclusive=false",
"-j", "4", '--lsn={0}'.format(target_lsn),
"--recovery-target-action=promote"]
),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.slow_start()
after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
self.assertEqual(
len(node.execute("postgres", "SELECT * FROM tbl0005")), 1)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_restore_full_ptrack_archive(self):
"""recovery to latest from archive full+ptrack backups"""
@ -420,11 +541,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -479,11 +596,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -535,11 +648,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -602,11 +711,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
bbalance = node.execute(
"postgres", "SELECT sum(bbalance) FROM pgbench_branches")
delta = node.execute(
@ -674,11 +779,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
options=["-j", "4", "--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
bbalance = node.execute(
"postgres", "SELECT sum(bbalance) FROM pgbench_branches")
delta = node.execute(
@ -718,7 +819,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
con.commit()
backup_id = self.backup_node(backup_dir, 'node', node)
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
# 1 - Try to restore to existing directory
node.stop()
@ -769,10 +870,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.execute("postgres", "SELECT id FROM test")
self.assertEqual(result[0][0], 1)
@ -785,8 +884,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type="page")
show_pb = self.show_pb(backup_dir, 'node')
self.assertEqual(show_pb[1]['Status'], "OK")
self.assertEqual(show_pb[2]['Status'], "OK")
self.assertEqual(show_pb[1]['status'], "OK")
self.assertEqual(show_pb[2]['status'], "OK")
node.stop()
node.cleanup()
@ -802,10 +901,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.execute("postgres", "SELECT id FROM test OFFSET 1")
self.assertEqual(result[0][0], 2)
@ -829,7 +925,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Full backup
self.backup_node(backup_dir, 'node', node)
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
# Create tablespace
tblspc_path = os.path.join(node.base_dir, "tblspc")
@ -845,8 +941,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# First page backup
self.backup_node(backup_dir, 'node', node, backup_type="page")
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Mode'], "PAGE")
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")
self.assertEqual(
self.show_pb(backup_dir, 'node')[1]['backup-mode'], "PAGE")
# Create tablespace table
with node.connect("postgres") as con:
@ -862,8 +959,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Second page backup
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type="page")
self.assertEqual(self.show_pb(backup_dir, 'node')[2]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[2]['Mode'], "PAGE")
self.assertEqual(self.show_pb(backup_dir, 'node')[2]['status'], "OK")
self.assertEqual(
self.show_pb(backup_dir, 'node')[2]['backup-mode'], "PAGE")
node.stop()
node.cleanup()
@ -879,10 +977,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
count = node.execute("postgres", "SELECT count(*) FROM tbl")
self.assertEqual(count[0][0], 4)
@ -933,10 +1028,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.psql("postgres", 'select * from t_heap')
self.assertTrue('does not exist' in result[2].decode("utf-8"))
@ -985,11 +1077,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.psql("postgres", 'select * from t_heap')
self.assertTrue('does not exist' in result[2].decode("utf-8"))
@ -1037,10 +1125,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.psql("postgres", 'select * from t_heap')
self.assertEqual(True, 'does not exist' in result[2].decode("utf-8"))
@ -1095,10 +1180,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.psql("postgres", 'select * from t_heap')
self.assertTrue('does not exist' in result[2].decode("utf-8"))
@ -1147,10 +1229,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"--recovery-target-name=savepoint",
"--recovery-target-action=promote"])
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result_new = node.safe_psql("postgres", "select * from t_heap")
res = node.psql("postgres", "select * from t_heap_1")

View File

@ -14,7 +14,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
def test_retention_redundancy_1(self):
"""purge backups using redundancy-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -24,7 +25,9 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'node', node)
node.start()
with open(os.path.join(backup_dir, 'backups', 'node', "pg_probackup.conf"), "a") as conf:
with open(os.path.join(
backup_dir, 'backups', 'node',
"pg_probackup.conf"), "a") as conf:
conf.write("retention-redundancy = 1\n")
# Make backups to be purged
@ -57,7 +60,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
for wal_name in os.listdir(os.path.join(backup_dir, 'wal', 'node')):
if not wal_name.endswith(".backup"):
#wal_name_b = wal_name.encode('ascii')
# wal_name_b = wal_name.encode('ascii')
self.assertEqual(wal_name[8:] > min_wal[8:], True)
self.assertEqual(wal_name[8:] > max_wal[8:], True)
@ -68,7 +71,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
def test_retention_window_2(self):
"""purge backups using window-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)

View File

@ -36,6 +36,35 @@ class OptionTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_show_json(self):
"""Status DONE and OK"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.assertEqual(
self.backup_node(
backup_dir, 'node', node,
options=["--log-level-console=panic"]),
None
)
self.backup_node(backup_dir, 'node', node)
self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_corrupt_2(self):
"""Status CORRUPT"""
@ -81,3 +110,94 @@ class OptionTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_no_control_file(self):
"""backup.control doesn't exist"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
# delete backup.control file
file = os.path.join(
backup_dir, "backups", "node",
backup_id, "backup.control")
os.remove(file)
self.assertIn('control file "{0}" doesn\'t exist'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_empty_control_file(self):
"""backup.control is empty"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
# truncate backup.control file
file = os.path.join(
backup_dir, "backups", "node",
backup_id, "backup.control")
fd = open(file, 'w')
fd.close()
self.assertIn('control file "{0}" is empty'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_corrupt_control_file(self):
"""backup.control contains invalid option"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
backup_id = self.backup_node(backup_dir, 'node', node)
# corrupt backup.control file
file = os.path.join(
backup_dir, "backups", "node",
backup_id, "backup.control")
fd = open(file, 'a')
fd.write("statuss = OK")
fd.close()
self.assertIn('invalid option "statuss" in file'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -748,6 +748,93 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_validate_instance_with_corrupted_full_and_try_restore(self):
"""make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups,
corrupt file in FULL backup and run validate on instance,
expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN,
try to restore backup with --no-validation option"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
file_path_t_heap = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
# FULL1
backup_id_1 = self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
# PAGE1
backup_id_2 = self.backup_node(backup_dir, 'node', node, backup_type='page')
# PAGE2
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(20000,30000) i")
backup_id_3 = self.backup_node(backup_dir, 'node', node, backup_type='page')
# FULL1
backup_id_4 = self.backup_node(backup_dir, 'node', node)
# PAGE3
node.safe_psql(
"postgres",
"insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(30000,40000) i")
backup_id_5 = self.backup_node(backup_dir, 'node', node, backup_type='page')
# Corrupt some file in FULL backup
file_full = os.path.join(backup_dir, 'backups/node', backup_id_1, 'database', file_path_t_heap)
with open(file_full, "rb+", 0) as f:
f.seek(84)
f.write(b"blah")
f.flush()
f.close
# Validate Instance
try:
self.validate_pb(backup_dir, 'node', options=['--log-level-file=verbose'])
self.assertEqual(1, 0, "Expecting Error because of data files corruption.\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
self.assertTrue(
'INFO: Validating backup {0}'.format(backup_id_1) in e.message
and "INFO: Validate backups of the instance 'node'" in e.message
and 'WARNING: Invalid CRC of backup file "{0}"'.format(file_full) in e.message
and 'WARNING: Backup {0} data files are corrupted'.format(backup_id_1) in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"')
self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"')
self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"')
self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "OK"')
self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"')
node.cleanup()
restore_out = self.restore_node(
backup_dir, 'node', node,
options=["--no-validate"])
self.assertIn(
"INFO: Restore of backup {0} completed.".format(backup_id_5),
restore_out,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_validate_instance_with_corrupted_full(self):
"""make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups,
@ -908,7 +995,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
backup_id = self.backup_node(backup_dir, 'node', node)
target_xid = None
with node.connect("postgres") as con:
res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
res = con.execute(
"INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
con.commit()
target_xid = res[0][0]
@ -1041,7 +1129,10 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_validate_corrupt_wal_between_backups(self):
"""make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors"""
"""
make archive node, make full backup, corrupt all wal files,
run validate to real xid, expect errors
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
@ -1083,7 +1174,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
else:
walfile = node.safe_psql(
'postgres',
'select pg_walfile_name(pg_current_wal_location())').rstrip()
'select pg_walfile_name(pg_current_wal_lsn())').rstrip()
if self.archive_compress:
walfile = walfile + '.gz'
@ -1134,12 +1225,12 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[0]['Status'],
self.show_pb(backup_dir, 'node')[0]['status'],
'Backup STATUS should be "OK"')
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[1]['Status'],
self.show_pb(backup_dir, 'node')[1]['status'],
'Backup STATUS should be "OK"')
# Clean after yourself
@ -1208,7 +1299,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
self.assertEqual(
'ERROR',
self.show_pb(backup_dir, 'node')[1]['Status'],
self.show_pb(backup_dir, 'node')[1]['status'],
'Backup {0} should have STATUS "ERROR"')
# Clean after yourself
@ -1316,7 +1407,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir)
node2.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node2.port))
node2.start()
node2.slow_start()
timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"]
timeline_node2 = node2.get_control_data()["Latest checkpoint's TimeLineID"]
@ -1405,7 +1496,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
except ProbackupException as e:
pass
self.assertTrue(
self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR')
self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR')
self.set_archiving(backup_dir, 'node', node)
node.reload()
self.backup_node(backup_dir, 'node', node, backup_type='page')
@ -1440,14 +1531,19 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'CORRUPT')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR')
self.assertTrue(self.show_pb(backup_dir, 'node')[7]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
self.assertTrue(
self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT')
self.assertTrue(
self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN')
self.assertTrue(
self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN')
self.assertTrue(
self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR')
self.assertTrue(
self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN')
os.rename(file_new, file)
try:
@ -1459,14 +1555,15 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR')
self.assertTrue(self.show_pb(backup_dir, 'node')[7]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK')
self.assertTrue(
self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR')
self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK')
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -1537,13 +1634,13 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'CORRUPT')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN')
os.rename(file_new, file)
file = os.path.join(
@ -1562,13 +1659,72 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'CORRUPT')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'CORRUPT')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN')
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_file_size_corruption_no_validate(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
# initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
node.safe_psql(
"postgres",
"CHECKPOINT;")
heap_path = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
heap_size = node.safe_psql(
"postgres",
"select pg_relation_size('t_heap')")
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4"], async=False, gdb=False)
node.stop()
node.cleanup()
# Let`s do file corruption
with open(os.path.join(backup_dir, "backups", 'node', backup_id, "database", heap_path), "rb+", 0) as f:
f.truncate(int(heap_size) - 4096)
f.flush()
f.close
node.cleanup()
try:
self.restore_node(
backup_dir, 'node', node,
options=["--no-validate"])
except ProbackupException as e:
self.assertTrue("ERROR: Data files restoring failed" in e.message, repr(e.message))
print "\nExpected error: \n" + e.message
# Clean after yourself
self.del_test_dir(module_name, fname)

240
win32build.pl Normal file
View File

@ -0,0 +1,240 @@
#!/usr/bin/perl
use JSON;
our $repack_version;
our $pgdir;
our $pgsrc;
if (@ARGV!=2) {
print STDERR "Usage $0 postgress-instalation-root pg-source-dir \n";
exit 1;
}
our $liblist="";
$pgdir = shift @ARGV;
$pgsrc = shift @ARGV if @ARGV;
our $arch = $ENV{'ARCH'} || "x64";
$arch='Win32' if ($arch eq 'x86' || $arch eq 'X86');
$arch='x64' if $arch eq 'X64';
$conffile = $pgsrc."/tools/msvc/config.pl";
die 'Could not find config.pl'
unless (-f $conffile);
our $config;
do $conffile;
if (! -d "$pgdir/bin" || !-d "$pgdir/include" || !-d "$pgdir/lib") {
print STDERR "Directory $pgdir doesn't look like root of postgresql installation\n";
exit 1;
}
our $includepath="";
our $libpath="";
our $libpath32="";
AddProject();
print "\n\n";
print $libpath."\n";
print $includepath."\n";
# open F,"<","META.json" or die "Cannot open META.json: $!\n";
# {
# local $/ = undef;
# $decoded = decode_json(<F>);
# $repack_version= $decoded->{'version'};
# }
# substitute new path in the project files
preprocess_project("./msvs/template.pg_probackup.vcxproj","./msvs/pg_probackup.vcxproj");
exit 0;
sub preprocess_project {
my $in = shift;
my $out = shift;
our $pgdir;
our $adddir;
my $libs;
if (defined $adddir) {
$libs ="$adddir;";
} else{
$libs ="";
}
open IN,"<",$in or die "Cannot open $in: $!\n";
open OUT,">",$out or die "Cannot open $out: $!\n";
# $includepath .= ";";
# $libpath .= ";";
while (<IN>) {
s/\@PGROOT\@/$pgdir/g;
s/\@ADDLIBS\@/$libpath/g;
s/\@ADDLIBS32\@/$libpath32/g;
s/\@PGSRC\@/$pgsrc/g;
s/\@ADDINCLUDE\@/$includepath/g;
print OUT $_;
}
close IN;
close OUT;
}
# my sub
sub AddLibrary
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary32
{
$inc = shift;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary64
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
}
sub AddIncludeDir
{
# my ($self, $inc) = @_;
$inc = shift;
if ($includepath ne '')
{
$includepath .= ';';
}
$includepath .= $inc;
}
sub AddProject
{
# my ($self, $name, $type, $folder, $initialdir) = @_;
if ($config->{zlib})
{
AddIncludeDir($config->{zlib} . '\include');
AddLibrary($config->{zlib} . '\lib\zdll.lib');
}
if ($config->{openssl})
{
AddIncludeDir($config->{openssl} . '\include');
if (-e "$config->{openssl}/lib/VC/ssleay32MD.lib")
{
AddLibrary(
$config->{openssl} . '\lib\VC\ssleay32.lib', 1);
AddLibrary(
$config->{openssl} . '\lib\VC\libeay32.lib', 1);
}
else
{
# We don't expect the config-specific library to be here,
# so don't ask for it in last parameter
AddLibrary(
$config->{openssl} . '\lib\ssleay32.lib', 0);
AddLibrary(
$config->{openssl} . '\lib\libeay32.lib', 0);
}
}
if ($config->{nls})
{
AddIncludeDir($config->{nls} . '\include');
AddLibrary($config->{nls} . '\lib\libintl.lib');
}
if ($config->{gss})
{
AddIncludeDir($config->{gss} . '\inc\krb5');
AddLibrary($config->{gss} . '\lib\i386\krb5_32.lib');
AddLibrary($config->{gss} . '\lib\i386\comerr32.lib');
AddLibrary($config->{gss} . '\lib\i386\gssapi32.lib');
}
if ($config->{iconv})
{
AddIncludeDir($config->{iconv} . '\include');
AddLibrary($config->{iconv} . '\lib\iconv.lib');
}
if ($config->{icu})
{
AddIncludeDir($config->{icu} . '\include');
AddLibrary32($config->{icu} . '\lib\icuin.lib');
AddLibrary32($config->{icu} . '\lib\icuuc.lib');
AddLibrary32($config->{icu} . '\lib\icudt.lib');
AddLibrary64($config->{icu} . '\lib64\icuin.lib');
AddLibrary64($config->{icu} . '\lib64\icuuc.lib');
AddLibrary64($config->{icu} . '\lib64\icudt.lib');
}
if ($config->{xml})
{
AddIncludeDir($config->{xml} . '\include');
AddIncludeDir($config->{xml} . '\include\libxml2');
AddLibrary($config->{xml} . '\lib\libxml2.lib');
}
if ($config->{xslt})
{
AddIncludeDir($config->{xslt} . '\include');
AddLibrary($config->{xslt} . '\lib\libxslt.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
# AddLibrary($config->{libedit} . "\\" .
# ($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
AddLibrary32($config->{libedit} . '\\lib32\edit.lib');
AddLibrary64($config->{libedit} . '\\lib64\edit.lib');
}
if ($config->{uuid})
{
AddIncludeDir($config->{uuid} . '\include');
AddLibrary($config->{uuid} . '\lib\uuid.lib');
}
if ($config->{zstd})
{
AddIncludeDir($config->{zstd});
# AddLibrary($config->{zstd}. "\\".($arch eq 'x64'? "zstdlib_x64.lib" : "zstdlib_x86.lib"));
AddLibrary32($config->{zstd}. "\\zstdlib_x86.lib");
AddLibrary64($config->{zstd}. "\\zstdlib_x64.lib") ;
}
# return $proj;
}

240
win32build96.pl Normal file
View File

@ -0,0 +1,240 @@
#!/usr/bin/perl
use JSON;
our $repack_version;
our $pgdir;
our $pgsrc;
if (@ARGV!=2) {
print STDERR "Usage $0 postgress-instalation-root pg-source-dir \n";
exit 1;
}
our $liblist="";
$pgdir = shift @ARGV;
$pgsrc = shift @ARGV if @ARGV;
our $arch = $ENV{'ARCH'} || "x64";
$arch='Win32' if ($arch eq 'x86' || $arch eq 'X86');
$arch='x64' if $arch eq 'X64';
$conffile = $pgsrc."/tools/msvc/config.pl";
die 'Could not find config.pl'
unless (-f $conffile);
our $config;
do $conffile;
if (! -d "$pgdir/bin" || !-d "$pgdir/include" || !-d "$pgdir/lib") {
print STDERR "Directory $pgdir doesn't look like root of postgresql installation\n";
exit 1;
}
our $includepath="";
our $libpath="";
our $libpath32="";
AddProject();
print "\n\n";
print $libpath."\n";
print $includepath."\n";
# open F,"<","META.json" or die "Cannot open META.json: $!\n";
# {
# local $/ = undef;
# $decoded = decode_json(<F>);
# $repack_version= $decoded->{'version'};
# }
# substitute new path in the project files
preprocess_project("./msvs/template.pg_probackup96.vcxproj","./msvs/pg_probackup.vcxproj");
exit 0;
sub preprocess_project {
my $in = shift;
my $out = shift;
our $pgdir;
our $adddir;
my $libs;
if (defined $adddir) {
$libs ="$adddir;";
} else{
$libs ="";
}
open IN,"<",$in or die "Cannot open $in: $!\n";
open OUT,">",$out or die "Cannot open $out: $!\n";
# $includepath .= ";";
# $libpath .= ";";
while (<IN>) {
s/\@PGROOT\@/$pgdir/g;
s/\@ADDLIBS\@/$libpath/g;
s/\@ADDLIBS32\@/$libpath32/g;
s/\@PGSRC\@/$pgsrc/g;
s/\@ADDINCLUDE\@/$includepath/g;
print OUT $_;
}
close IN;
close OUT;
}
# my sub
sub AddLibrary
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary32
{
$inc = shift;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary64
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
}
sub AddIncludeDir
{
# my ($self, $inc) = @_;
$inc = shift;
if ($includepath ne '')
{
$includepath .= ';';
}
$includepath .= $inc;
}
sub AddProject
{
# my ($self, $name, $type, $folder, $initialdir) = @_;
if ($config->{zlib})
{
AddIncludeDir($config->{zlib} . '\include');
AddLibrary($config->{zlib} . '\lib\zdll.lib');
}
if ($config->{openssl})
{
AddIncludeDir($config->{openssl} . '\include');
if (-e "$config->{openssl}/lib/VC/ssleay32MD.lib")
{
AddLibrary(
$config->{openssl} . '\lib\VC\ssleay32.lib', 1);
AddLibrary(
$config->{openssl} . '\lib\VC\libeay32.lib', 1);
}
else
{
# We don't expect the config-specific library to be here,
# so don't ask for it in last parameter
AddLibrary(
$config->{openssl} . '\lib\ssleay32.lib', 0);
AddLibrary(
$config->{openssl} . '\lib\libeay32.lib', 0);
}
}
if ($config->{nls})
{
AddIncludeDir($config->{nls} . '\include');
AddLibrary($config->{nls} . '\lib\libintl.lib');
}
if ($config->{gss})
{
AddIncludeDir($config->{gss} . '\inc\krb5');
AddLibrary($config->{gss} . '\lib\i386\krb5_32.lib');
AddLibrary($config->{gss} . '\lib\i386\comerr32.lib');
AddLibrary($config->{gss} . '\lib\i386\gssapi32.lib');
}
if ($config->{iconv})
{
AddIncludeDir($config->{iconv} . '\include');
AddLibrary($config->{iconv} . '\lib\iconv.lib');
}
if ($config->{icu})
{
AddIncludeDir($config->{icu} . '\include');
AddLibrary32($config->{icu} . '\lib\icuin.lib');
AddLibrary32($config->{icu} . '\lib\icuuc.lib');
AddLibrary32($config->{icu} . '\lib\icudt.lib');
AddLibrary64($config->{icu} . '\lib64\icuin.lib');
AddLibrary64($config->{icu} . '\lib64\icuuc.lib');
AddLibrary64($config->{icu} . '\lib64\icudt.lib');
}
if ($config->{xml})
{
AddIncludeDir($config->{xml} . '\include');
AddIncludeDir($config->{xml} . '\include\libxml2');
AddLibrary($config->{xml} . '\lib\libxml2.lib');
}
if ($config->{xslt})
{
AddIncludeDir($config->{xslt} . '\include');
AddLibrary($config->{xslt} . '\lib\libxslt.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
# AddLibrary($config->{libedit} . "\\" .
# ($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
AddLibrary32($config->{libedit} . '\\lib32\edit.lib');
AddLibrary64($config->{libedit} . '\\lib64\edit.lib');
}
if ($config->{uuid})
{
AddIncludeDir($config->{uuid} . '\include');
AddLibrary($config->{uuid} . '\lib\uuid.lib');
}
if ($config->{zstd})
{
AddIncludeDir($config->{zstd});
# AddLibrary($config->{zstd}. "\\".($arch eq 'x64'? "zstdlib_x64.lib" : "zstdlib_x86.lib"));
AddLibrary32($config->{zstd}. "\\zstdlib_x86.lib");
AddLibrary64($config->{zstd}. "\\zstdlib_x64.lib") ;
}
# return $proj;
}

219
win32build_2.pl Normal file
View File

@ -0,0 +1,219 @@
#!/usr/bin/perl
use JSON;
our $repack_version;
our $pgdir;
our $pgsrc;
if (@ARGV!=2) {
print STDERR "Usage $0 postgress-instalation-root pg-source-dir \n";
exit 1;
}
our $liblist="";
$pgdir = shift @ARGV;
$pgsrc = shift @ARGV if @ARGV;
our $arch = $ENV{'ARCH'} || "x64";
$arch='Win32' if ($arch eq 'x86' || $arch eq 'X86');
$arch='x64' if $arch eq 'X64';
$conffile = $pgsrc."/tools/msvc/config.pl";
die 'Could not find config.pl'
unless (-f $conffile);
our $config;
do $conffile;
if (! -d "$pgdir/bin" || !-d "$pgdir/include" || !-d "$pgdir/lib") {
print STDERR "Directory $pgdir doesn't look like root of postgresql installation\n";
exit 1;
}
our $includepath="";
our $libpath="";
AddProject();
print "\n\n";
print $libpath."\n";
print $includepath."\n";
# open F,"<","META.json" or die "Cannot open META.json: $!\n";
# {
# local $/ = undef;
# $decoded = decode_json(<F>);
# $repack_version= $decoded->{'version'};
# }
# substitute new path in the project files
preprocess_project("./msvs/template.pg_probackup_2.vcxproj","./msvs/pg_probackup.vcxproj");
exit 0;
sub preprocess_project {
my $in = shift;
my $out = shift;
our $pgdir;
our $adddir;
my $libs;
if (defined $adddir) {
$libs ="$adddir;";
} else{
$libs ="";
}
open IN,"<",$in or die "Cannot open $in: $!\n";
open OUT,">",$out or die "Cannot open $out: $!\n";
# $includepath .= ";";
# $libpath .= ";";
while (<IN>) {
s/\@PGROOT\@/$pgdir/g;
s/\@ADDLIBS\@/$libpath/g;
s/\@PGSRC\@/$pgsrc/g;
s/\@ADDINCLUDE\@/$includepath/g;
print OUT $_;
}
close IN;
close OUT;
}
# my sub
sub AddLibrary
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
}
sub AddIncludeDir
{
# my ($self, $inc) = @_;
$inc = shift;
if ($includepath ne '')
{
$includepath .= ';';
}
$includepath .= $inc;
}
sub AddProject
{
# my ($self, $name, $type, $folder, $initialdir) = @_;
if ($config->{zlib})
{
AddIncludeDir($config->{zlib} . '\include');
AddLibrary($config->{zlib} . '\lib\zdll.lib');
}
if ($config->{openssl})
{
AddIncludeDir($config->{openssl} . '\include');
if (-e "$config->{openssl}/lib/VC/ssleay32MD.lib")
{
AddLibrary(
$config->{openssl} . '\lib\VC\ssleay32.lib', 1);
AddLibrary(
$config->{openssl} . '\lib\VC\libeay32.lib', 1);
}
else
{
# We don't expect the config-specific library to be here,
# so don't ask for it in last parameter
AddLibrary(
$config->{openssl} . '\lib\ssleay32.lib', 0);
AddLibrary(
$config->{openssl} . '\lib\libeay32.lib', 0);
}
}
if ($config->{nls})
{
AddIncludeDir($config->{nls} . '\include');
AddLibrary($config->{nls} . '\lib\libintl.lib');
}
if ($config->{gss})
{
AddIncludeDir($config->{gss} . '\inc\krb5');
AddLibrary($config->{gss} . '\lib\i386\krb5_32.lib');
AddLibrary($config->{gss} . '\lib\i386\comerr32.lib');
AddLibrary($config->{gss} . '\lib\i386\gssapi32.lib');
}
if ($config->{iconv})
{
AddIncludeDir($config->{iconv} . '\include');
AddLibrary($config->{iconv} . '\lib\iconv.lib');
}
if ($config->{icu})
{
AddIncludeDir($config->{icu} . '\include');
if ($arch eq 'Win32')
{
AddLibrary($config->{icu} . '\lib\icuin.lib');
AddLibrary($config->{icu} . '\lib\icuuc.lib');
AddLibrary($config->{icu} . '\lib\icudt.lib');
}
else
{
AddLibrary($config->{icu} . '\lib64\icuin.lib');
AddLibrary($config->{icu} . '\lib64\icuuc.lib');
AddLibrary($config->{icu} . '\lib64\icudt.lib');
}
}
if ($config->{xml})
{
AddIncludeDir($config->{xml} . '\include');
AddIncludeDir($config->{xml} . '\include\libxml2');
AddLibrary($config->{xml} . '\lib\libxml2.lib');
}
if ($config->{xslt})
{
AddIncludeDir($config->{xslt} . '\include');
AddLibrary($config->{xslt} . '\lib\libxslt.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
AddLibrary($config->{libedit} . "\\" .
($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
}
if ($config->{uuid})
{
AddIncludeDir($config->{uuid} . '\include');
AddLibrary($config->{uuid} . '\lib\uuid.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
AddLibrary($config->{libedit} . "\\" .
($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
}
if ($config->{zstd})
{
AddIncludeDir($config->{zstd});
AddLibrary($config->{zstd}. "\\".
($arch eq 'x64'? "zstdlib_x64.lib" : "zstdlib_x86.lib")
);
}
# return $proj;
}