1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-03-17 21:18:00 +02:00

Merge branch 'master' into pgpro-1286-no-validation-restore

This commit is contained in:
Grigory Smolkin 2018-07-18 08:41:30 +03:00
commit 5bed18c378
54 changed files with 3188 additions and 998 deletions

View File

@ -4,7 +4,8 @@ OBJS = src/backup.o src/catalog.o src/configure.o src/data.o \
src/pg_probackup.o src/restore.o src/show.o src/status.o \
src/util.o src/validate.o src/datapagemap.o src/parsexlog.o \
src/xlogreader.o src/streamutil.o src/receivelog.o \
src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o
src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o \
src/utils/json.o src/utils/thread.o
EXTRA_CLEAN = src/datapagemap.c src/datapagemap.h src/xlogreader.c \
src/receivelog.c src/receivelog.h src/streamutil.c src/streamutil.h src/logging.h
@ -62,6 +63,7 @@ src/streamutil.c: $(top_srcdir)/src/bin/pg_basebackup/streamutil.c
src/streamutil.h: $(top_srcdir)/src/bin/pg_basebackup/streamutil.h
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/streamutil.h $@
ifeq ($(MAJORVERSION),10)
src/walmethods.c: $(top_srcdir)/src/bin/pg_basebackup/walmethods.c
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/walmethods.c $@

1
doit.cmd Normal file
View File

@ -0,0 +1 @@
perl win32build.pl "C:\PgProject\pgwininstall-ee\builddir\distr_X64_10.4.1\postgresql" "C:\PgProject\pgwininstall-ee\builddir\postgresql\postgrespro-enterprise-10.4.1\src"

1
doit96.cmd Normal file
View File

@ -0,0 +1 @@
perl win32build96.pl "C:\PgPro96" "C:\PgProject\pg96ee\postgrespro\src"

28
msvs/pg_probackup.sln Normal file
View File

@ -0,0 +1,28 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Express 2013 for Windows Desktop
VisualStudioVersion = 12.0.31101.0
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "pg_probackup", "pg_probackup.vcxproj", "{4886B21A-D8CA-4A03-BADF-743B24C88327}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
Debug|x64 = Debug|x64
Release|Win32 = Release|Win32
Release|x64 = Release|x64
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|Win32.ActiveCfg = Debug|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|Win32.Build.0 = Debug|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|x64.ActiveCfg = Debug|x64
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Debug|x64.Build.0 = Debug|x64
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|Win32.ActiveCfg = Release|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|Win32.Build.0 = Release|Win32
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|x64.ActiveCfg = Release|x64
{4886B21A-D8CA-4A03-BADF-743B24C88327}.Release|x64.Build.0 = Release|x64
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal

View File

@ -0,0 +1,212 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{4886B21A-D8CA-4A03-BADF-743B24C88327}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>pg_probackup</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\walmethods.c" />
<ClCompile Include="@PGSRC@\bin\pg_rewind\datapagemap.c" />
<ClCompile Include="@PGSRC@\interfaces\libpq\pthread-win32.c" />
<ClCompile Include="..\src\archive.c" />
<ClCompile Include="..\src\backup.c" />
<ClCompile Include="..\src\catalog.c" />
<ClCompile Include="..\src\configure.c" />
<ClCompile Include="..\src\data.c" />
<ClCompile Include="..\src\delete.c" />
<ClCompile Include="..\src\dir.c" />
<ClCompile Include="..\src\fetch.c" />
<ClCompile Include="..\src\help.c" />
<ClCompile Include="..\src\init.c" />
<ClCompile Include="..\src\parsexlog.c" />
<ClCompile Include="..\src\pg_probackup.c" />
<ClCompile Include="..\src\restore.c" />
<ClCompile Include="..\src\show.c" />
<ClCompile Include="..\src\status.c" />
<ClCompile Include="..\src\util.c" />
<ClCompile Include="..\src\utils\logger.c" />
<ClCompile Include="..\src\utils\parray.c" />
<ClCompile Include="..\src\utils\pgut.c" />
<ClCompile Include="..\src\validate.c" />
<ClCompile Include="..\src\utils\json.c" />
<ClCompile Include="..\src\utils\thread.c" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="@PGSRC@\bin\pg_basebackup\receivelog.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\streamutil.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\walmethods.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\datapagemap.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\logging.h" />
<ClInclude Include="..\src\pg_probackup.h" />
<ClInclude Include="..\src\utils\logger.h" />
<ClInclude Include="..\src\utils\parray.h" />
<ClInclude Include="..\src\utils\pgut.h" />
<ClInclude Include="..\src\utils\json.h" />
<ClInclude Include="..\src\utils\thread.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -0,0 +1,210 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{4886B21A-D8CA-4A03-BADF-743B24C88327}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>pg_probackup</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;@PGSRC@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;$(LibraryPath)</LibraryPath>
<ExcludePath />
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS32@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
<ClCompile Include="@PGSRC@\bin\pg_rewind\datapagemap.c" />
<ClCompile Include="@PGSRC@\interfaces\libpq\pthread-win32.c" />
<ClCompile Include="..\src\archive.c" />
<ClCompile Include="..\src\backup.c" />
<ClCompile Include="..\src\catalog.c" />
<ClCompile Include="..\src\configure.c" />
<ClCompile Include="..\src\data.c" />
<ClCompile Include="..\src\delete.c" />
<ClCompile Include="..\src\dir.c" />
<ClCompile Include="..\src\fetch.c" />
<ClCompile Include="..\src\help.c" />
<ClCompile Include="..\src\init.c" />
<ClCompile Include="..\src\parsexlog.c" />
<ClCompile Include="..\src\pg_probackup.c" />
<ClCompile Include="..\src\restore.c" />
<ClCompile Include="..\src\show.c" />
<ClCompile Include="..\src\status.c" />
<ClCompile Include="..\src\util.c" />
<ClCompile Include="..\src\utils\logger.c" />
<ClCompile Include="..\src\utils\parray.c" />
<ClCompile Include="..\src\utils\pgut.c" />
<ClCompile Include="..\src\validate.c" />
<ClCompile Include="..\src\utils\json.c" />
<ClCompile Include="..\src\utils\thread.c" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="@PGSRC@\bin\pg_basebackup\receivelog.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\streamutil.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\datapagemap.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\logging.h" />
<ClInclude Include="..\src\pg_probackup.h" />
<ClInclude Include="..\src\utils\logger.h" />
<ClInclude Include="..\src\utils\parray.h" />
<ClInclude Include="..\src\utils\pgut.h" />
<ClInclude Include="..\src\utils\json.h" />
<ClInclude Include="..\src\utils\thread.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -0,0 +1,203 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{4886B21A-D8CA-4A03-BADF-743B24C88327}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>pg_probackup</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v120</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>../;@PGSRC@\include;@PGSRC@\bin\pg_basebackup;@PGSRC@\bin\pg_rewind;@PGSRC@\include\port\win32_msvc;@PGSRC@\interfaces\libpq;@PGSRC@\include\port\win32;@PGSRC@\port;@ADDINCLUDE@;$(IncludePath)</IncludePath>
<LibraryPath>@PGROOT@\lib;@$(LibraryPath)</LibraryPath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_CRT_NONSTDC_NO_DEPRECATE;_CRT_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;_CRT_NONSTDC_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>@ADDLIBS@;libpgfeutils.lib;libpgcommon.lib;libpgport.lib;libpq.lib;ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<IgnoreSpecificDefaultLibraries>libc;%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
</Link>
</ItemDefinitionGroup>
<!-- @PGROOT@\lib;@ADDLIBS@ @PGSRC@ @ADDINCLUDE@ -->
<ItemGroup>
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
<ClCompile Include="@PGSRC@\bin\pg_basebackup\walmethods.c" />
<ClCompile Include="@PGSRC@\bin\pg_rewind\datapagemap.c" />
<ClCompile Include="@PGSRC@\interfaces\libpq\pthread-win32.c" />
<ClCompile Include="..\src\archive.c" />
<ClCompile Include="..\src\backup.c" />
<ClCompile Include="..\src\catalog.c" />
<ClCompile Include="..\src\configure.c" />
<ClCompile Include="..\src\data.c" />
<ClCompile Include="..\src\delete.c" />
<ClCompile Include="..\src\dir.c" />
<ClCompile Include="..\src\fetch.c" />
<ClCompile Include="..\src\help.c" />
<ClCompile Include="..\src\init.c" />
<ClCompile Include="..\src\parsexlog.c" />
<ClCompile Include="..\src\pg_probackup.c" />
<ClCompile Include="..\src\restore.c" />
<ClCompile Include="..\src\show.c" />
<ClCompile Include="..\src\status.c" />
<ClCompile Include="..\src\util.c" />
<ClCompile Include="..\src\utils\logger.c" />
<ClCompile Include="..\src\utils\parray.c" />
<ClCompile Include="..\src\utils\pgut.c" />
<ClCompile Include="..\src\validate.c" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="@PGSRC@\bin\pg_basebackup\receivelog.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\streamutil.h" />
<ClInclude Include="@PGSRC@\bin\pg_basebackup\walmethods.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\datapagemap.h" />
<ClInclude Include="@PGSRC@\bin\pg_rewind\logging.h" />
<ClInclude Include="..\src\pg_probackup.h" />
<ClInclude Include="..\src\utils\logger.h" />
<ClInclude Include="..\src\utils\parray.h" />
<ClInclude Include="..\src\utils\pgut.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -18,16 +18,16 @@
#include <unistd.h>
#include <dirent.h>
#include <time.h>
#include <pthread.h>
#include "libpq/pqsignal.h"
#include "storage/bufpage.h"
#include "catalog/catalog.h"
#include "catalog/pg_tablespace.h"
#include "datapagemap.h"
#include "receivelog.h"
#include "streamutil.h"
#include "libpq/pqsignal.h"
#include "pgtar.h"
#include "receivelog.h"
#include "storage/bufpage.h"
#include "streamutil.h"
#include "utils/thread.h"
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
static XLogRecPtr stop_backup_lsn = InvalidXLogRecPtr;
@ -89,8 +89,8 @@ static bool pg_stop_backup_is_sent = false;
static void backup_cleanup(bool fatal, void *userdata);
static void backup_disconnect(bool fatal, void *userdata);
static void backup_files(void *arg);
static void remote_backup_files(void *arg);
static void *backup_files(void *arg);
static void *remote_backup_files(void *arg);
static void do_backup_instance(void);
@ -105,7 +105,7 @@ static void write_backup_file_list(parray *files, const char *root);
static void wait_wal_lsn(XLogRecPtr lsn, bool wait_prev_segment);
static void wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup);
static void make_pagemap_from_ptrack(parray *files);
static void StreamLog(void *arg);
static void *StreamLog(void *arg);
static void get_remote_pgdata_filelist(parray *files);
static void ReceiveFileList(parray* files, PGconn *conn, PGresult *res, int rownum);
@ -253,7 +253,11 @@ ReceiveFileList(parray* files, PGconn *conn, PGresult *res, int rownum)
else if (copybuf[156] == '2')
{
/* Symlink */
#ifndef WIN32
pgfile->mode |= S_IFLNK;
#else
pgfile->mode |= S_IFDIR;
#endif
}
else
elog(ERROR, "Unrecognized link indicator \"%c\"\n",
@ -289,7 +293,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
DATABASE_DIR);
join_path_components(to_path, database_path, file->path);
out = fopen(to_path, "w");
out = fopen(to_path, PG_BINARY_W);
if (out == NULL)
{
int errno_tmp = errno;
@ -353,7 +357,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
elog(ERROR, "final receive failed: status %d ; %s",PQresultStatus(res), PQerrorMessage(conn));
}
file->write_size = file->read_size;
file->write_size = (int64) file->read_size;
FIN_CRC32C(file->crc);
fclose(out);
@ -363,7 +367,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
* Take a remote backup of the PGDATA at a file level.
* Copy all directories and files listed in backup_files_list.
*/
static void
static void *
remote_backup_files(void *arg)
{
int i;
@ -385,7 +389,7 @@ remote_backup_files(void *arg)
if (S_ISDIR(file->mode))
continue;
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
if (!pg_atomic_test_set_flag(&file->lock))
continue;
file_backup_conn = pgut_connect_replication(pgut_dbname);
@ -434,13 +438,15 @@ remote_backup_files(void *arg)
/* receive the data from stream and write to backup file */
remote_copy_file(file_backup_conn, file);
elog(VERBOSE, "File \"%s\". Copied %lu bytes",
file->path, (unsigned long) file->write_size);
elog(VERBOSE, "File \"%s\". Copied " INT64_FORMAT " bytes",
file->path, file->write_size);
PQfinish(file_backup_conn);
}
/* Data files transferring is successful */
arguments->ret = 0;
return NULL;
}
/*
@ -456,8 +462,9 @@ do_backup_instance(void)
char label[1024];
XLogRecPtr prev_backup_start_lsn = InvalidXLogRecPtr;
pthread_t backup_threads[num_threads];
backup_files_args *backup_threads_args[num_threads];
/* arrays with meta info for multi threaded backup */
pthread_t *backup_threads;
backup_files_args *backup_threads_args;
bool backup_isok = true;
pgBackup *prev_backup = NULL;
@ -592,8 +599,7 @@ do_backup_instance(void)
/* By default there are some error */
stream_thread_arg.ret = 1;
pthread_create(&stream_thread, NULL, (void *(*)(void *)) StreamLog,
&stream_thread_arg);
pthread_create(&stream_thread, NULL, StreamLog, &stream_thread_arg);
}
/* initialize backup list */
@ -633,7 +639,7 @@ do_backup_instance(void)
* For backup from master wait for previous segment.
* For backup from replica wait for current segment.
*/
!from_replica, backup_files_list);
!current.from_replica, backup_files_list);
}
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
@ -681,16 +687,19 @@ do_backup_instance(void)
}
/* setup threads */
__sync_lock_release(&file->lock);
pg_atomic_clear_flag(&file->lock);
}
/* sort by size for load balancing */
parray_qsort(backup_files_list, pgFileCompareSize);
/* init thread args with own file lists */
backup_threads = (pthread_t *) palloc(sizeof(pthread_t)*num_threads);
backup_threads_args = (backup_files_args *) palloc(sizeof(backup_files_args)*num_threads);
for (i = 0; i < num_threads; i++)
{
backup_files_args *arg = pg_malloc(sizeof(backup_files_args));
backup_files_args *arg = &(backup_threads_args[i]);
arg->from_root = pgdata;
arg->to_root = database_path;
@ -701,33 +710,27 @@ do_backup_instance(void)
arg->thread_cancel_conn = NULL;
/* By default there are some error */
arg->ret = 1;
backup_threads_args[i] = arg;
}
/* Run threads */
elog(LOG, "Start transfering data files");
for (i = 0; i < num_threads; i++)
{
backup_files_args *arg = &(backup_threads_args[i]);
elog(VERBOSE, "Start thread num: %i", i);
if (!is_remote_backup)
pthread_create(&backup_threads[i], NULL,
(void *(*)(void *)) backup_files,
backup_threads_args[i]);
pthread_create(&backup_threads[i], NULL, backup_files, arg);
else
pthread_create(&backup_threads[i], NULL,
(void *(*)(void *)) remote_backup_files,
backup_threads_args[i]);
pthread_create(&backup_threads[i], NULL, remote_backup_files, arg);
}
/* Wait threads */
for (i = 0; i < num_threads; i++)
{
pthread_join(backup_threads[i], NULL);
if (backup_threads_args[i]->ret == 1)
if (backup_threads_args[i].ret == 1)
backup_isok = false;
pg_free(backup_threads_args[i]);
}
if (backup_isok)
elog(LOG, "Data files are transfered");
@ -815,11 +818,15 @@ do_backup(time_t start_time)
pgut_atexit_push(backup_disconnect, NULL);
current.primary_conninfo = pgut_get_conninfo_string(backup_conn);
current.compress_alg = compress_alg;
current.compress_level = compress_level;
/* Confirm data block size and xlog block size are compatible */
confirm_block_size("block_size", BLCKSZ);
confirm_block_size("wal_block_size", XLOG_BLCKSZ);
from_replica = pg_is_in_recovery();
current.from_replica = pg_is_in_recovery();
/* Confirm that this server version is supported */
check_server_version();
@ -859,7 +866,7 @@ do_backup(time_t start_time)
}
}
if (from_replica)
if (current.from_replica)
{
/* Check master connection options */
if (master_host == NULL)
@ -956,7 +963,7 @@ check_server_version(void)
"server version is %s, must be %s or higher",
server_version_str, "9.5");
if (from_replica && server_version < 90600)
if (current.from_replica && server_version < 90600)
elog(ERROR,
"server version is %s, must be %s or higher for backup from replica",
server_version_str, "9.6");
@ -1013,7 +1020,7 @@ check_system_identifiers(void)
system_id_pgdata = get_system_identifier(pgdata);
system_id_conn = get_remote_system_identifier(backup_conn);
if (system_id_conn != system_identifier)
elog(ERROR, "Backup data directory was initialized for system id %ld, but connected instance system id is %ld",
system_identifier, system_id_conn);
@ -1036,14 +1043,14 @@ confirm_block_size(const char *name, int blcksz)
res = pgut_execute(backup_conn, "SELECT pg_catalog.current_setting($1)", 1, &name);
if (PQntuples(res) != 1 || PQnfields(res) != 1)
elog(ERROR, "cannot get %s: %s", name, PQerrorMessage(backup_conn));
block_size = strtol(PQgetvalue(res, 0, 0), &endp, 10);
PQclear(res);
if ((endp && *endp) || block_size != blcksz)
elog(ERROR,
"%s(%d) is not compatible(%d expected)",
name, block_size, blcksz);
PQclear(res);
}
/*
@ -1061,7 +1068,7 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
params[0] = label;
/* For replica we call pg_start_backup() on master */
conn = (from_replica) ? master_conn : backup_conn;
conn = (backup->from_replica) ? master_conn : backup_conn;
/* 2nd argument is 'fast'*/
params[1] = smooth ? "false" : "true";
@ -1076,6 +1083,12 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
2,
params);
/*
* Set flag that pg_start_backup() was called. If an error will happen it
* is necessary to call pg_stop_backup() in backup_cleanup().
*/
backup_in_progress = true;
/* Extract timeline and LSN from results of pg_start_backup() */
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
/* Calculate LSN */
@ -1106,14 +1119,8 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
}
/* Wait for start_lsn to be replayed by replica */
if (from_replica)
if (backup->from_replica)
wait_replica_wal_lsn(backup->start_lsn, true);
/*
* Set flag that pg_start_backup() was called. If an error will happen it
* is necessary to call pg_stop_backup() in backup_cleanup().
*/
backup_in_progress = true;
}
/*
@ -1555,8 +1562,6 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
{
uint32 try_count = 0;
Assert(from_replica);
while (true)
{
PGresult *res;
@ -1651,7 +1656,7 @@ pg_stop_backup(pgBackup *backup)
elog(FATAL, "backup is not in progress");
/* For replica we call pg_stop_backup() on master */
conn = (from_replica) ? master_conn : backup_conn;
conn = (current.from_replica) ? master_conn : backup_conn;
/* Remove annoying NOTICE messages generated by backend */
res = pgut_execute(conn, "SET client_min_messages = warning;",
@ -1664,7 +1669,7 @@ pg_stop_backup(pgBackup *backup)
const char *params[1];
char name[1024];
if (!from_replica)
if (!current.from_replica)
snprintf(name, lengthof(name), "pg_probackup, backup_id %s",
base36enc(backup->start_time));
else
@ -1800,7 +1805,7 @@ pg_stop_backup(pgBackup *backup)
/* Write backup_label */
join_path_components(backup_label, path, PG_BACKUP_LABEL_FILE);
fp = fopen(backup_label, "w");
fp = fopen(backup_label, PG_BINARY_W);
if (fp == NULL)
elog(ERROR, "can't open backup label file \"%s\": %s",
backup_label, strerror(errno));
@ -1831,7 +1836,7 @@ pg_stop_backup(pgBackup *backup)
elog(ERROR,
"result of txid_snapshot_xmax() is invalid: %s",
PQgetvalue(res, 0, 0));
if (!parse_time(PQgetvalue(res, 0, 1), &recovery_time))
if (!parse_time(PQgetvalue(res, 0, 1), &recovery_time, true))
elog(ERROR,
"result of current_timestamp is invalid: %s",
PQgetvalue(res, 0, 1));
@ -1848,7 +1853,7 @@ pg_stop_backup(pgBackup *backup)
char tablespace_map[MAXPGPATH];
join_path_components(tablespace_map, path, PG_TABLESPACE_MAP_FILE);
fp = fopen(tablespace_map, "w");
fp = fopen(tablespace_map, PG_BINARY_W);
if (fp == NULL)
elog(ERROR, "can't open tablespace map file \"%s\": %s",
tablespace_map, strerror(errno));
@ -1892,7 +1897,7 @@ pg_stop_backup(pgBackup *backup)
stream_xlog_path[MAXPGPATH];
/* Wait for stop_lsn to be received by replica */
if (from_replica)
if (backup->from_replica)
wait_replica_wal_lsn(stop_backup_lsn, false);
/*
* Wait for stop_lsn to be archived or streamed.
@ -2004,7 +2009,7 @@ backup_disconnect(bool fatal, void *userdata)
* In incremental backup mode, copy only files or datafiles' pages changed after
* previous backup.
*/
static void
static void *
backup_files(void *arg)
{
int i;
@ -2019,7 +2024,7 @@ backup_files(void *arg)
pgFile *file = (pgFile *) parray_get(arguments->backup_files_list, i);
elog(VERBOSE, "Copying file: \"%s\" ", file->path);
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
if (!pg_atomic_test_set_flag(&file->lock))
continue;
/* check for interrupt */
@ -2092,27 +2097,24 @@ backup_files(void *arg)
continue;
}
}
else
/* TODO:
* Check if file exists in previous backup
* If exists:
* if mtime > start_backup_time of parent backup,
* copy file to backup
* if mtime < start_backup_time
* calculate crc, compare crc to old file
* if crc is the same -> skip file
*/
if (!copy_file(arguments->from_root,
arguments->to_root,
file))
/* TODO:
* Check if file exists in previous backup
* If exists:
* if mtime > start_backup_time of parent backup,
* copy file to backup
* if mtime < start_backup_time
* calculate crc, compare crc to old file
* if crc is the same -> skip file
*/
else if (!copy_file(arguments->from_root, arguments->to_root, file))
{
file->write_size = BYTES_INVALID;
elog(VERBOSE, "File \"%s\" was not copied to backup", file->path);
continue;
}
elog(VERBOSE, "File \"%s\". Copied %lu bytes",
file->path, (unsigned long) file->write_size);
elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes",
file->path, file->write_size);
}
else
elog(LOG, "unexpected file type %d", buf.st_mode);
@ -2124,6 +2126,8 @@ backup_files(void *arg)
/* Data files transferring is successful */
arguments->ret = 0;
return NULL;
}
/*
@ -2399,12 +2403,12 @@ make_pagemap_from_ptrack(parray *files)
if (file->is_datafile)
{
if (file->tblspcOid == tblspcOid_with_ptrack_init
&& file->dbOid == dbOid_with_ptrack_init)
if (file->tblspcOid == tblspcOid_with_ptrack_init &&
file->dbOid == dbOid_with_ptrack_init)
{
/* ignore ptrack if ptrack_init exists */
elog(VERBOSE, "Ignoring ptrack because of ptrack_init for file: %s", file->path);
file->pagemap.bitmapsize = PageBitmapIsAbsent;
file->pagemap_isabsent = true;
continue;
}
@ -2457,7 +2461,7 @@ make_pagemap_from_ptrack(parray *files)
* - target relation was deleted.
*/
elog(VERBOSE, "Ptrack is missing for file: %s", file->path);
file->pagemap.bitmapsize = PageBitmapIsAbsent;
file->pagemap_isabsent = true;
}
}
}
@ -2533,7 +2537,7 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
/*
* Start the log streaming
*/
static void
static void *
StreamLog(void *arg)
{
XLogRecPtr startpos;
@ -2607,6 +2611,8 @@ StreamLog(void *arg)
PQfinish(stream_arg->conn);
stream_arg->conn = NULL;
return NULL;
}
/*

View File

@ -12,7 +12,6 @@
#include <dirent.h>
#include <fcntl.h>
#include <libgen.h>
#include <signal.h>
#include <sys/file.h>
#include <sys/stat.h>
@ -251,14 +250,14 @@ IsDir(const char *dirpath, const char *entry)
parray *
catalog_get_backup_list(time_t requested_backup_id)
{
DIR *date_dir = NULL;
struct dirent *date_ent = NULL;
DIR *data_dir = NULL;
struct dirent *data_ent = NULL;
parray *backups = NULL;
pgBackup *backup = NULL;
/* open backup instance backups directory */
date_dir = opendir(backup_instance_path);
if (date_dir == NULL)
data_dir = opendir(backup_instance_path);
if (data_dir == NULL)
{
elog(WARNING, "cannot open directory \"%s\": %s", backup_instance_path,
strerror(errno));
@ -267,21 +266,21 @@ catalog_get_backup_list(time_t requested_backup_id)
/* scan the directory and list backups */
backups = parray_new();
for (; (date_ent = readdir(date_dir)) != NULL; errno = 0)
for (; (data_ent = readdir(data_dir)) != NULL; errno = 0)
{
char backup_conf_path[MAXPGPATH];
char date_path[MAXPGPATH];
char data_path[MAXPGPATH];
/* skip not-directory entries and hidden entries */
if (!IsDir(backup_instance_path, date_ent->d_name)
|| date_ent->d_name[0] == '.')
if (!IsDir(backup_instance_path, data_ent->d_name)
|| data_ent->d_name[0] == '.')
continue;
/* open subdirectory of specific backup */
join_path_components(date_path, backup_instance_path, date_ent->d_name);
join_path_components(data_path, backup_instance_path, data_ent->d_name);
/* read backup information from BACKUP_CONTROL_FILE */
snprintf(backup_conf_path, MAXPGPATH, "%s/%s", date_path, BACKUP_CONTROL_FILE);
snprintf(backup_conf_path, MAXPGPATH, "%s/%s", data_path, BACKUP_CONTROL_FILE);
backup = readBackupControlFile(backup_conf_path);
/* ignore corrupted backups */
@ -299,8 +298,8 @@ catalog_get_backup_list(time_t requested_backup_id)
if (errno && errno != ENOENT)
{
elog(WARNING, "cannot read date directory \"%s\": %s",
date_ent->d_name, strerror(errno));
elog(WARNING, "cannot read data directory \"%s\": %s",
data_ent->d_name, strerror(errno));
goto err_proc;
}
}
@ -311,16 +310,16 @@ catalog_get_backup_list(time_t requested_backup_id)
goto err_proc;
}
closedir(date_dir);
date_dir = NULL;
closedir(data_dir);
data_dir = NULL;
parray_qsort(backups, pgBackupCompareIdDesc);
return backups;
err_proc:
if (date_dir)
closedir(date_dir);
if (data_dir)
closedir(data_dir);
if (backup)
pgBackupFree(backup);
if (backups)
@ -385,15 +384,17 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
fprintf(out, "#Configuration\n");
fprintf(out, "backup-mode = %s\n", pgBackupGetBackupMode(backup));
fprintf(out, "stream = %s\n", backup->stream?"true":"false");
fprintf(out, "compress-alg = %s\n", deparse_compress_alg(compress_alg));
fprintf(out, "compress-level = %d\n", compress_level);
fprintf(out, "from-replica = %s\n", from_replica?"true":"false");
fprintf(out, "stream = %s\n", backup->stream ? "true" : "false");
fprintf(out, "compress-alg = %s\n",
deparse_compress_alg(backup->compress_alg));
fprintf(out, "compress-level = %d\n", backup->compress_level);
fprintf(out, "from-replica = %s\n", backup->from_replica ? "true" : "false");
fprintf(out, "\n#Compatibility\n");
fprintf(out, "block-size = %u\n", backup->block_size);
fprintf(out, "xlog-block-size = %u\n", backup->wal_block_size);
fprintf(out, "checksum-version = %u\n", backup->checksum_version);
fprintf(out, "program-version = %s\n", PROGRAM_VERSION);
if (backup->server_version[0] != '\0')
fprintf(out, "server-version = %s\n", backup->server_version);
@ -429,7 +430,7 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
if (backup->data_bytes != BYTES_INVALID)
fprintf(out, "data-bytes = " INT64_FORMAT "\n", backup->data_bytes);
if (backup->data_bytes != BYTES_INVALID)
if (backup->wal_bytes != BYTES_INVALID)
fprintf(out, "wal-bytes = " INT64_FORMAT "\n", backup->wal_bytes);
fprintf(out, "status = %s\n", status2str(backup->status));
@ -475,10 +476,9 @@ readBackupControlFile(const char *path)
char *stop_lsn = NULL;
char *status = NULL;
char *parent_backup = NULL;
char *compress_alg = NULL;
char *program_version = NULL;
char *server_version = NULL;
int *compress_level;
bool *from_replica;
char *compress_alg = NULL;
pgut_option options[] =
{
@ -495,13 +495,14 @@ readBackupControlFile(const char *path)
{'u', 0, "block-size", &backup->block_size, SOURCE_FILE_STRICT},
{'u', 0, "xlog-block-size", &backup->wal_block_size, SOURCE_FILE_STRICT},
{'u', 0, "checksum-version", &backup->checksum_version, SOURCE_FILE_STRICT},
{'s', 0, "program-version", &program_version, SOURCE_FILE_STRICT},
{'s', 0, "server-version", &server_version, SOURCE_FILE_STRICT},
{'b', 0, "stream", &backup->stream, SOURCE_FILE_STRICT},
{'s', 0, "status", &status, SOURCE_FILE_STRICT},
{'s', 0, "parent-backup-id", &parent_backup, SOURCE_FILE_STRICT},
{'s', 0, "compress-alg", &compress_alg, SOURCE_FILE_STRICT},
{'u', 0, "compress-level", &compress_level, SOURCE_FILE_STRICT},
{'b', 0, "from-replica", &from_replica, SOURCE_FILE_STRICT},
{'u', 0, "compress-level", &backup->compress_level, SOURCE_FILE_STRICT},
{'b', 0, "from-replica", &backup->from_replica, SOURCE_FILE_STRICT},
{'s', 0, "primary-conninfo", &backup->primary_conninfo, SOURCE_FILE_STRICT},
{0}
};
@ -571,6 +572,13 @@ readBackupControlFile(const char *path)
free(parent_backup);
}
if (program_version)
{
StrNCpy(backup->program_version, program_version,
sizeof(backup->program_version));
pfree(program_version);
}
if (server_version)
{
StrNCpy(backup->server_version, server_version,
@ -578,6 +586,9 @@ readBackupControlFile(const char *path)
pfree(server_version);
}
if (compress_alg)
backup->compress_alg = parse_compress_alg(compress_alg);
return backup;
}
@ -626,6 +637,48 @@ deparse_backup_mode(BackupMode mode)
return NULL;
}
CompressAlg
parse_compress_alg(const char *arg)
{
size_t len;
/* Skip all spaces detected */
while (isspace((unsigned char)*arg))
arg++;
len = strlen(arg);
if (len == 0)
elog(ERROR, "compress algrorithm is empty");
if (pg_strncasecmp("zlib", arg, len) == 0)
return ZLIB_COMPRESS;
else if (pg_strncasecmp("pglz", arg, len) == 0)
return PGLZ_COMPRESS;
else if (pg_strncasecmp("none", arg, len) == 0)
return NONE_COMPRESS;
else
elog(ERROR, "invalid compress algorithm value \"%s\"", arg);
return NOT_DEFINED_COMPRESS;
}
const char*
deparse_compress_alg(int alg)
{
switch (alg)
{
case NONE_COMPRESS:
case NOT_DEFINED_COMPRESS:
return "none";
case ZLIB_COMPRESS:
return "zlib";
case PGLZ_COMPRESS:
return "pglz";
}
return NULL;
}
/* free pgBackup object */
void
pgBackupFree(void *backup)

View File

@ -2,19 +2,37 @@
*
* configure.c: - manage backup catalog.
*
* Copyright (c) 2017-2017, Postgres Professional
* Copyright (c) 2017-2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include "pqexpbuffer.h"
#include "utils/json.h"
static void opt_log_level_console(pgut_option *opt, const char *arg);
static void opt_log_level_file(pgut_option *opt, const char *arg);
static void opt_compress_alg(pgut_option *opt, const char *arg);
static void show_configure_start(void);
static void show_configure_end(void);
static void show_configure(pgBackupConfig *config);
static void show_configure_json(pgBackupConfig *config);
static pgBackupConfig *cur_config = NULL;
static PQExpBufferData show_buf;
static int32 json_level = 0;
/*
* All this code needs refactoring.
*/
/* Set configure options */
int
do_configure(bool show_only)
@ -68,7 +86,7 @@ do_configure(bool show_only)
config->compress_level = compress_level;
if (show_only)
writeBackupCatalogConfig(stderr, config);
show_configure(config);
else
writeBackupCatalogConfigFile(config);
@ -114,7 +132,7 @@ writeBackupCatalogConfig(FILE *out, pgBackupConfig *config)
fprintf(out, "#Backup instance info\n");
fprintf(out, "PGDATA = %s\n", config->pgdata);
fprintf(out, "system-identifier = %li\n", config->system_identifier);
fprintf(out, "system-identifier = " UINT64_FORMAT "\n", config->system_identifier);
fprintf(out, "#Connection parameters:\n");
if (config->pgdatabase)
@ -251,7 +269,6 @@ readBackupCatalogConfigFile(void)
pgut_readopt(path, options, ERROR);
return config;
}
static void
@ -271,3 +288,146 @@ opt_compress_alg(pgut_option *opt, const char *arg)
{
cur_config->compress_alg = parse_compress_alg(arg);
}
/*
* Initialize configure visualization.
*/
static void
show_configure_start(void)
{
if (show_format == SHOW_PLAIN)
return;
/* For now we need buffer only for JSON format */
json_level = 0;
initPQExpBuffer(&show_buf);
}
/*
* Finalize configure visualization.
*/
static void
show_configure_end(void)
{
if (show_format == SHOW_PLAIN)
return;
else
appendPQExpBufferChar(&show_buf, '\n');
fputs(show_buf.data, stdout);
termPQExpBuffer(&show_buf);
}
/*
* Show configure information of pg_probackup.
*/
static void
show_configure(pgBackupConfig *config)
{
show_configure_start();
if (show_format == SHOW_PLAIN)
writeBackupCatalogConfig(stdout, config);
else
show_configure_json(config);
show_configure_end();
}
/*
* Json output.
*/
static void
show_configure_json(pgBackupConfig *config)
{
PQExpBuffer buf = &show_buf;
json_add(buf, JT_BEGIN_OBJECT, &json_level);
json_add_value(buf, "pgdata", config->pgdata, json_level, false);
json_add_key(buf, "system-identifier", json_level, true);
appendPQExpBuffer(buf, UINT64_FORMAT, config->system_identifier);
/* Connection parameters */
if (config->pgdatabase)
json_add_value(buf, "pgdatabase", config->pgdatabase, json_level, true);
if (config->pghost)
json_add_value(buf, "pghost", config->pghost, json_level, true);
if (config->pgport)
json_add_value(buf, "pgport", config->pgport, json_level, true);
if (config->pguser)
json_add_value(buf, "pguser", config->pguser, json_level, true);
/* Replica parameters */
if (config->master_host)
json_add_value(buf, "master-host", config->master_host, json_level,
true);
if (config->master_port)
json_add_value(buf, "master-port", config->master_port, json_level,
true);
if (config->master_db)
json_add_value(buf, "master-db", config->master_db, json_level, true);
if (config->master_user)
json_add_value(buf, "master-user", config->master_user, json_level,
true);
if (config->replica_timeout != INT_MIN)
{
json_add_key(buf, "replica-timeout", json_level, true);
appendPQExpBuffer(buf, "%d", config->replica_timeout);
}
/* Logging parameters */
if (config->log_level_console != INT_MIN)
json_add_value(buf, "log-level-console",
deparse_log_level(config->log_level_console), json_level,
true);
if (config->log_level_file != INT_MIN)
json_add_value(buf, "log-level-file",
deparse_log_level(config->log_level_file), json_level,
true);
if (config->log_filename)
json_add_value(buf, "log-filename", config->log_filename, json_level,
true);
if (config->error_log_filename)
json_add_value(buf, "error-log-filename", config->error_log_filename,
json_level, true);
if (config->log_directory)
json_add_value(buf, "log-directory", config->log_directory, json_level,
true);
if (config->log_rotation_size)
{
json_add_key(buf, "log-rotation-size", json_level, true);
appendPQExpBuffer(buf, "%d", config->log_rotation_size);
}
if (config->log_rotation_age)
{
json_add_key(buf, "log-rotation-age", json_level, true);
appendPQExpBuffer(buf, "%d", config->log_rotation_age);
}
/* Retention parameters */
if (config->retention_redundancy)
{
json_add_key(buf, "retention-redundancy", json_level, true);
appendPQExpBuffer(buf, "%u", config->retention_redundancy);
}
if (config->retention_window)
{
json_add_key(buf, "retention-window", json_level, true);
appendPQExpBuffer(buf, "%u", config->retention_window);
}
/* Compression parameters */
json_add_value(buf, "compress-algorithm",
deparse_compress_alg(config->compress_alg), json_level,
true);
json_add_key(buf, "compress-level", json_level, true);
appendPQExpBuffer(buf, "%d", config->compress_level);
json_add(buf, JT_END_OBJECT, &json_level);
}

View File

@ -27,7 +27,7 @@
#ifdef HAVE_LIBZ
/* Implementation of zlib compression method */
static size_t zlib_compress(void* dst, size_t dst_size, void const* src, size_t src_size)
static int32 zlib_compress(void* dst, size_t dst_size, void const* src, size_t src_size)
{
uLongf compressed_size = dst_size;
int rc = compress2(dst, &compressed_size, src, src_size, compress_level);
@ -35,7 +35,7 @@ static size_t zlib_compress(void* dst, size_t dst_size, void const* src, size_t
}
/* Implementation of zlib compression method */
static size_t zlib_decompress(void* dst, size_t dst_size, void const* src, size_t src_size)
static int32 zlib_decompress(void* dst, size_t dst_size, void const* src, size_t src_size)
{
uLongf dest_len = dst_size;
int rc = uncompress(dst, &dest_len, src, src_size);
@ -47,7 +47,7 @@ static size_t zlib_decompress(void* dst, size_t dst_size, void const* src, size_
* Compresses source into dest using algorithm. Returns the number of bytes
* written in the destination buffer, or -1 if compression fails.
*/
static size_t
static int32
do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, CompressAlg alg)
{
switch (alg)
@ -70,7 +70,7 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, Compre
* Decompresses source into dest using algorithm. Returns the number of bytes
* decompressed in the destination buffer, or -1 if decompression fails.
*/
static size_t
static int32
do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size, CompressAlg alg)
{
switch (alg)
@ -101,6 +101,7 @@ typedef struct BackupPageHeader
/* Special value for compressed_size field */
#define PageIsTruncated -2
#define SkipCurrentPage -3
/* Verify page's header */
static bool
@ -134,8 +135,8 @@ static int
read_page_from_file(pgFile *file, BlockNumber blknum,
FILE *in, Page page, XLogRecPtr *page_lsn)
{
off_t offset = blknum*BLCKSZ;
size_t read_len = 0;
off_t offset = blknum * BLCKSZ;
size_t read_len = 0;
/* read the block */
if (fseek(in, offset, SEEK_SET) != 0)
@ -216,31 +217,32 @@ read_page_from_file(pgFile *file, BlockNumber blknum,
}
/*
* Backup the specified block from a file of a relation.
* Verify page header and checksum of the page and write it
* to the backup file.
* Retrieves a page taking the backup mode into account
* and writes it into argument "page". Argument "page"
* should be a pointer to allocated BLCKSZ of bytes.
*
* Prints appropriate warnings/errors/etc into log.
* Returns 0 if page was successfully retrieved
* SkipCurrentPage(-3) if we need to skip this page
* PageIsTruncated(-2) if the page was truncated
*/
static void
backup_data_page(backup_files_args *arguments,
static int32
prepare_page(backup_files_args *arguments,
pgFile *file, XLogRecPtr prev_backup_start_lsn,
BlockNumber blknum, BlockNumber nblocks,
FILE *in, FILE *out,
pg_crc32 *crc, int *n_skipped,
BackupMode backup_mode)
FILE *in, int *n_skipped,
BackupMode backup_mode,
Page page)
{
BackupPageHeader header;
Page page = malloc(BLCKSZ);
Page compressed_page = NULL;
XLogRecPtr page_lsn = 0;
size_t write_buffer_size;
char write_buffer[BLCKSZ+sizeof(header)];
int try_again = 100;
bool page_is_valid = false;
XLogRecPtr page_lsn = 0;
int try_again = 100;
bool page_is_valid = false;
bool page_is_truncated = false;
BlockNumber absolute_blknum = file->segno * RELSEG_SIZE + blknum;
header.block = blknum;
header.compressed_size = 0;
/* check for interrupt */
if (interrupted)
elog(ERROR, "Interrupted during backup");
/*
* Read the page and verify its header and checksum.
@ -258,7 +260,7 @@ backup_data_page(backup_files_args *arguments,
if (result == 0)
{
/* This block was truncated.*/
header.compressed_size = PageIsTruncated;
page_is_truncated = true;
/* Page is not actually valid, but it is absent
* and we're not going to reread it or validate */
page_is_valid = true;
@ -291,35 +293,38 @@ backup_data_page(backup_files_args *arguments,
if (backup_mode == BACKUP_MODE_DIFF_PTRACK || (!page_is_valid && is_ptrack_support))
{
size_t page_size = 0;
free(page);
page = NULL;
page = (Page) pg_ptrack_get_block(arguments, file->dbOid, file->tblspcOid,
Page ptrack_page = NULL;
ptrack_page = (Page) pg_ptrack_get_block(arguments, file->dbOid, file->tblspcOid,
file->relOid, absolute_blknum, &page_size);
if (page == NULL)
if (ptrack_page == NULL)
{
/* This block was truncated.*/
header.compressed_size = PageIsTruncated;
page_is_truncated = true;
}
else if (page_size != BLCKSZ)
{
free(ptrack_page);
elog(ERROR, "File: %s, block %u, expected block size %d, but read %lu",
file->path, absolute_blknum, BLCKSZ, page_size);
}
else
{
/*
* We need to copy the page that was successfully
* retreieved from ptrack into our output "page" parameter.
* We must set checksum here, because it is outdated
* in the block recieved from shared buffers.
*/
memcpy(page, ptrack_page, BLCKSZ);
free(ptrack_page);
if (is_checksum_enabled)
((PageHeader) page)->pd_checksum = pg_checksum_page(page, absolute_blknum);
}
/* get lsn from page, provided by pg_ptrack_get_block() */
if (backup_mode == BACKUP_MODE_DIFF_DELTA &&
file->exists_in_prev &&
header.compressed_size != PageIsTruncated &&
!page_is_truncated &&
!parse_page(page, &page_lsn))
elog(ERROR, "Cannot parse page after pg_ptrack_get_block. "
"Possible risk of a memory corruption");
@ -328,52 +333,70 @@ backup_data_page(backup_files_args *arguments,
if (backup_mode == BACKUP_MODE_DIFF_DELTA &&
file->exists_in_prev &&
header.compressed_size != PageIsTruncated &&
!page_is_truncated &&
page_lsn < prev_backup_start_lsn)
{
elog(VERBOSE, "Skipping blknum: %u in file: %s", blknum, file->path);
(*n_skipped)++;
free(page);
return;
return SkipCurrentPage;
}
if (header.compressed_size != PageIsTruncated)
{
file->read_size += BLCKSZ;
if (page_is_truncated)
return PageIsTruncated;
compressed_page = malloc(BLCKSZ);
return 0;
}
static void
compress_and_backup_page(pgFile *file, BlockNumber blknum,
FILE *in, FILE *out, pg_crc32 *crc,
int page_state, Page page)
{
BackupPageHeader header;
size_t write_buffer_size = sizeof(header);
char write_buffer[BLCKSZ+sizeof(header)];
char compressed_page[BLCKSZ];
if(page_state == SkipCurrentPage)
return;
header.block = blknum;
header.compressed_size = page_state;
if(page_state == PageIsTruncated)
{
/*
* The page was truncated. Write only header
* to know that we must truncate restored file
*/
memcpy(write_buffer, &header, sizeof(header));
}
else
{
/* The page was not truncated, so we need to compress it */
header.compressed_size = do_compress(compressed_page, BLCKSZ,
page, BLCKSZ, compress_alg);
page, BLCKSZ, compress_alg);
file->compress_alg = compress_alg;
file->read_size += BLCKSZ;
Assert (header.compressed_size <= BLCKSZ);
}
write_buffer_size = sizeof(header);
/*
* The page was truncated. Write only header
* to know that we must truncate restored file
*/
if (header.compressed_size == PageIsTruncated)
{
memcpy(write_buffer, &header, sizeof(header));
}
/* The page compression failed. Write it as is. */
else if (header.compressed_size == -1)
{
header.compressed_size = BLCKSZ;
memcpy(write_buffer, &header, sizeof(header));
memcpy(write_buffer + sizeof(header), page, BLCKSZ);
write_buffer_size += header.compressed_size;
}
/* The page was successfully compressed */
else if (header.compressed_size > 0)
{
memcpy(write_buffer, &header, sizeof(header));
memcpy(write_buffer + sizeof(header), compressed_page, header.compressed_size);
write_buffer_size += MAXALIGN(header.compressed_size);
/* The page was successfully compressed. */
if (header.compressed_size > 0)
{
memcpy(write_buffer, &header, sizeof(header));
memcpy(write_buffer + sizeof(header),
compressed_page, header.compressed_size);
write_buffer_size += MAXALIGN(header.compressed_size);
}
/* Nonpositive value means that compression failed. Write it as is. */
else
{
header.compressed_size = BLCKSZ;
memcpy(write_buffer, &header, sizeof(header));
memcpy(write_buffer + sizeof(header), page, BLCKSZ);
write_buffer_size += header.compressed_size;
}
}
/* elog(VERBOSE, "backup blkno %u, compressed_size %d write_buffer_size %ld",
@ -393,11 +416,6 @@ backup_data_page(backup_files_args *arguments,
}
file->write_size += write_buffer_size;
if (page != NULL)
free(page);
if (compressed_page != NULL)
free(compressed_page);
}
/*
@ -414,13 +432,15 @@ backup_data_file(backup_files_args* arguments,
pgFile *file, XLogRecPtr prev_backup_start_lsn,
BackupMode backup_mode)
{
char to_path[MAXPGPATH];
FILE *in;
FILE *out;
BlockNumber blknum = 0;
BlockNumber nblocks = 0;
int n_blocks_skipped = 0;
int n_blocks_read = 0;
char to_path[MAXPGPATH];
FILE *in;
FILE *out;
BlockNumber blknum = 0;
BlockNumber nblocks = 0;
int n_blocks_skipped = 0;
int n_blocks_read = 0;
int page_state;
char curr_page[BLCKSZ];
/*
* Skip unchanged file only if it exists in previous backup.
@ -430,7 +450,7 @@ backup_data_file(backup_files_args* arguments,
if ((backup_mode == BACKUP_MODE_DIFF_PAGE ||
backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
file->pagemap.bitmapsize == PageBitmapIsEmpty &&
file->exists_in_prev)
file->exists_in_prev && !file->pagemap_isabsent)
{
/*
* There are no changed blocks since last backup. We want make
@ -446,7 +466,7 @@ backup_data_file(backup_files_args* arguments,
INIT_CRC32C(file->crc);
/* open backup mode file for read */
in = fopen(file->path, "r");
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
{
FIN_CRC32C(file->crc);
@ -480,7 +500,7 @@ backup_data_file(backup_files_args* arguments,
/* open backup file for write */
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
out = fopen(to_path, "w");
out = fopen(to_path, PG_BINARY_W);
if (out == NULL)
{
int errno_tmp = errno;
@ -494,15 +514,16 @@ backup_data_file(backup_files_args* arguments,
* If page map is empty or file is not present in previous backup
* backup all pages of the relation.
*/
if (file->pagemap.bitmapsize == PageBitmapIsEmpty
|| file->pagemap.bitmapsize == PageBitmapIsAbsent
|| !file->exists_in_prev)
if (file->pagemap.bitmapsize == PageBitmapIsEmpty ||
file->pagemap_isabsent || !file->exists_in_prev)
{
for (blknum = 0; blknum < nblocks; blknum++)
{
backup_data_page(arguments, file, prev_backup_start_lsn, blknum,
nblocks, in, out, &(file->crc),
&n_blocks_skipped, backup_mode);
page_state = prepare_page(arguments, file, prev_backup_start_lsn,
blknum, nblocks, in, &n_blocks_skipped,
backup_mode, curr_page);
compress_and_backup_page(file, blknum, in, out, &(file->crc),
page_state, curr_page);
n_blocks_read++;
}
if (backup_mode == BACKUP_MODE_DIFF_DELTA)
@ -515,9 +536,11 @@ backup_data_file(backup_files_args* arguments,
iter = datapagemap_iterate(&file->pagemap);
while (datapagemap_next(iter, &blknum))
{
backup_data_page(arguments, file, prev_backup_start_lsn, blknum,
nblocks, in, out, &(file->crc),
&n_blocks_skipped, backup_mode);
page_state = prepare_page(arguments, file, prev_backup_start_lsn,
blknum, nblocks, in, &n_blocks_skipped,
backup_mode, curr_page);
compress_and_backup_page(file, blknum, in, out, &(file->crc),
page_state, curr_page);
n_blocks_read++;
}
@ -569,18 +592,18 @@ restore_data_file(const char *from_root,
pgFile *file,
pgBackup *backup)
{
char to_path[MAXPGPATH];
FILE *in = NULL;
FILE *out = NULL;
BackupPageHeader header;
BlockNumber blknum;
size_t file_size;
char to_path[MAXPGPATH];
FILE *in = NULL;
FILE *out = NULL;
BackupPageHeader header;
BlockNumber blknum;
size_t file_size;
/* BYTES_INVALID allowed only in case of restoring file from DELTA backup */
if (file->write_size != BYTES_INVALID)
{
/* open backup mode file for read */
in = fopen(file->path, "r");
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
{
elog(ERROR, "cannot open backup file \"%s\": %s", file->path,
@ -594,9 +617,9 @@ restore_data_file(const char *from_root,
* re-open it with "w" to create an empty file.
*/
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
out = fopen(to_path, "r+");
out = fopen(to_path, PG_BINARY_R "+");
if (out == NULL && errno == ENOENT)
out = fopen(to_path, "w");
out = fopen(to_path, PG_BINARY_W);
if (out == NULL)
{
int errno_tmp = errno;
@ -632,7 +655,8 @@ restore_data_file(const char *from_root,
}
if (header.block < blknum)
elog(ERROR, "backup is broken at file->path %s block %u",file->path, blknum);
elog(ERROR, "backup is broken at file->path %s block %u",
file->path, blknum);
if (header.compressed_size == PageIsTruncated)
{
@ -643,7 +667,8 @@ restore_data_file(const char *from_root,
if (ftruncate(fileno(out), header.block * BLCKSZ) != 0)
elog(ERROR, "cannot truncate \"%s\": %s",
file->path, strerror(errno));
elog(VERBOSE, "truncate file %s to block %u", file->path, header.block);
elog(VERBOSE, "truncate file %s to block %u",
file->path, header.block);
break;
}
@ -661,10 +686,12 @@ restore_data_file(const char *from_root,
uncompressed_size = do_decompress(page.data, BLCKSZ,
compressed_page.data,
header.compressed_size, file->compress_alg);
header.compressed_size,
file->compress_alg);
if (uncompressed_size != BLCKSZ)
elog(ERROR, "page uncompressed to %ld bytes. != BLCKSZ", uncompressed_size);
elog(ERROR, "page uncompressed to %ld bytes. != BLCKSZ",
uncompressed_size);
}
/*
@ -690,13 +717,13 @@ restore_data_file(const char *from_root,
}
}
/*
* DELTA backup have no knowledge about truncated blocks as PAGE or PTRACK do
* But during DELTA backup we read every file in PGDATA and thus DELTA backup
* knows exact size of every file at the time of backup.
* So when restoring file from DELTA backup we, knowning it`s size at
* a time of a backup, can truncate file to this size.
*/
/*
* DELTA backup have no knowledge about truncated blocks as PAGE or PTRACK do
* But during DELTA backup we read every file in PGDATA and thus DELTA backup
* knows exact size of every file at the time of backup.
* So when restoring file from DELTA backup we, knowning it`s size at
* a time of a backup, can truncate file to this size.
*/
if (backup->backup_mode == BACKUP_MODE_DIFF_DELTA)
{
@ -711,7 +738,8 @@ restore_data_file(const char *from_root,
if (ftruncate(fileno(out), file->n_blocks * BLCKSZ) != 0)
elog(ERROR, "cannot truncate \"%s\": %s",
file->path, strerror(errno));
elog(INFO, "Delta truncate file %s to block %u", file->path, file->n_blocks);
elog(INFO, "Delta truncate file %s to block %u",
file->path, file->n_blocks);
}
}
@ -759,7 +787,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
file->write_size = 0;
/* open backup mode file for read */
in = fopen(file->path, "r");
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
{
FIN_CRC32C(crc);
@ -775,7 +803,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
/* open backup file for write */
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
out = fopen(to_path, "w");
out = fopen(to_path, PG_BINARY_W);
if (out == NULL)
{
int errno_tmp = errno;
@ -843,7 +871,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
file->read_size += read_len;
}
file->write_size = file->read_size;
file->write_size = (int64) file->read_size;
/* finish CRC calculation and store into pgFile */
FIN_CRC32C(crc);
file->crc = crc;
@ -918,7 +946,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
bool overwrite)
{
FILE *in = NULL;
FILE *out;
FILE *out=NULL;
char buf[XLOG_BLCKSZ];
const char *to_path_p = to_path;
char to_path_temp[MAXPGPATH];
@ -930,7 +958,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
#endif
/* open file for read */
in = fopen(from_path, "r");
in = fopen(from_path, PG_BINARY_R);
if (in == NULL)
elog(ERROR, "Cannot open source WAL file \"%s\": %s", from_path,
strerror(errno));
@ -946,7 +974,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", gz_to_path);
gz_out = gzopen(to_path_temp, "wb");
gz_out = gzopen(to_path_temp, PG_BINARY_W);
if (gzsetparams(gz_out, compress_level, Z_DEFAULT_STRATEGY) != Z_OK)
elog(ERROR, "Cannot set compression level %d to file \"%s\": %s",
compress_level, to_path_temp, get_gz_error(gz_out, errno));
@ -961,7 +989,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", to_path);
out = fopen(to_path_temp, "w");
out = fopen(to_path_temp, PG_BINARY_W);
if (out == NULL)
elog(ERROR, "Cannot open destination WAL file \"%s\": %s",
to_path_temp, strerror(errno));
@ -1083,7 +1111,7 @@ get_wal_file(const char *from_path, const char *to_path)
#endif
/* open file for read */
in = fopen(from_path, "r");
in = fopen(from_path, PG_BINARY_R);
if (in == NULL)
{
#ifdef HAVE_LIBZ
@ -1092,7 +1120,7 @@ get_wal_file(const char *from_path, const char *to_path)
* extension.
*/
snprintf(gz_from_path, sizeof(gz_from_path), "%s.gz", from_path);
gz_in = gzopen(gz_from_path, "rb");
gz_in = gzopen(gz_from_path, PG_BINARY_R);
if (gz_in == NULL)
{
if (errno == ENOENT)
@ -1120,7 +1148,7 @@ get_wal_file(const char *from_path, const char *to_path)
/* open backup file for write */
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", to_path);
out = fopen(to_path_temp, "w");
out = fopen(to_path_temp, PG_BINARY_W);
if (out == NULL)
elog(ERROR, "Cannot open destination WAL file \"%s\": %s",
to_path_temp, strerror(errno));
@ -1254,7 +1282,7 @@ calc_file_checksum(pgFile *file)
file->write_size = 0;
/* open backup mode file for read */
in = fopen(file->path, "r");
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
{
FIN_CRC32C(crc);

View File

@ -10,7 +10,6 @@
#include "pg_probackup.h"
#include <libgen.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
@ -101,11 +100,10 @@ static void dir_list_file_internal(parray *files, const char *root,
int
dir_create_dir(const char *dir, mode_t mode)
{
char copy[MAXPGPATH];
char parent[MAXPGPATH];
strncpy(copy, dir, MAXPGPATH);
strncpy(parent, dirname(copy), MAXPGPATH);
strncpy(parent, dir, MAXPGPATH);
get_parent_directory(parent);
/* Create parent first */
if (access(parent, F_OK) == -1)
@ -153,6 +151,8 @@ pgFileInit(const char *path)
file = (pgFile *) pgut_malloc(sizeof(pgFile));
file->name = NULL;
file->size = 0;
file->mode = 0;
file->read_size = 0;
@ -161,7 +161,8 @@ pgFileInit(const char *path)
file->is_datafile = false;
file->linked = NULL;
file->pagemap.bitmap = NULL;
file->pagemap.bitmapsize = PageBitmapIsAbsent;
file->pagemap.bitmapsize = PageBitmapIsEmpty;
file->pagemap_isabsent = false;
file->tblspcOid = 0;
file->dbOid = 0;
file->relOid = 0;
@ -232,7 +233,7 @@ pgFileGetCRC(pgFile *file)
int errno_tmp;
/* open file in binary read mode */
fp = fopen(file->path, "r");
fp = fopen(file->path, PG_BINARY_R);
if (fp == NULL)
elog(ERROR, "cannot open file \"%s\": %s",
file->path, strerror(errno));
@ -350,7 +351,7 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
char black_item[MAXPGPATH * 2];
black_list = parray_new();
black_list_file = fopen(path, "r");
black_list_file = fopen(path, PG_BINARY_R);
if (black_list_file == NULL)
elog(ERROR, "cannot open black_list: %s", strerror(errno));
@ -817,17 +818,22 @@ print_file_list(FILE *out, const parray *files, const char *root)
if (root && strstr(path, root) == path)
path = GetRelativePath(path, root);
fprintf(out, "{\"path\":\"%s\", \"size\":\"%lu\",\"mode\":\"%u\","
"\"is_datafile\":\"%u\", \"is_cfs\":\"%u\", \"crc\":\"%u\","
fprintf(out, "{\"path\":\"%s\", \"size\":\"" INT64_FORMAT "\", "
"\"mode\":\"%u\", \"is_datafile\":\"%u\", "
"\"is_cfs\":\"%u\", \"crc\":\"%u\", "
"\"compress_alg\":\"%s\"",
path, (unsigned long) file->write_size, file->mode,
file->is_datafile?1:0, file->is_cfs?1:0, file->crc,
path, file->write_size, file->mode,
file->is_datafile ? 1 : 0, file->is_cfs ? 1 : 0, file->crc,
deparse_compress_alg(file->compress_alg));
if (file->is_datafile)
fprintf(out, ",\"segno\":\"%d\"", file->segno);
#ifndef WIN32
if (S_ISLNK(file->mode))
#else
if (pgwin32_is_junction(file->path))
#endif
fprintf(out, ",\"linked\":\"%s\"", file->linked);
if (file->n_blocks != -1)
@ -1024,7 +1030,7 @@ dir_read_file_list(const char *root, const char *file_txt)
file = pgFileInit(filepath);
file->write_size = (size_t) write_size;
file->write_size = (int64) write_size;
file->mode = (mode_t) mode;
file->is_datafile = is_datafile ? true : false;
file->is_cfs = is_cfs ? true : false;

View File

@ -56,7 +56,7 @@ help_command(char *command)
|| strcmp(command, "-V") == 0)
printf(_("No help page for \"%s\" command. Try pg_probackup help\n"), command);
else
printf(_("Unknown command. Try pg_probackup help\n"));
printf(_("Unknown command \"%s\". Try pg_probackup help\n"), command);
exit(0);
}
@ -89,6 +89,7 @@ help_pg_probackup(void)
printf(_(" [--replica-timeout=timeout]\n"));
printf(_("\n %s show-config -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--format=format]\n"));
printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-C] [--stream [-S slot-name]] [--backup-pg-log]\n"));
@ -124,10 +125,12 @@ help_pg_probackup(void)
printf(_("\n %s validate -B backup-dir [--instance=instance_name]\n"), PROGRAM_NAME);
printf(_(" [-i backup-id] [--progress]\n"));
printf(_(" [--time=time|--xid=xid [--inclusive=boolean]]\n"));
printf(_(" [--recovery-target-name=target-name]\n"));
printf(_(" [--timeline=timeline]\n"));
printf(_("\n %s show -B backup-dir\n"), PROGRAM_NAME);
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
printf(_(" [--format=format]\n"));
printf(_("\n %s delete -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--wal] [-i backup-id | --expired]\n"));
@ -331,6 +334,8 @@ help_validate(void)
printf(_(" --xid=xid transaction ID up to which recovery will proceed\n"));
printf(_(" --inclusive=boolean whether we stop just after the recovery target\n"));
printf(_(" --timeline=timeline recovering into a particular timeline\n"));
printf(_(" --recovery-target-name=target-name\n"));
printf(_(" the named restore point to which recovery will proceed\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
@ -358,11 +363,13 @@ static void
help_show(void)
{
printf(_("%s show -B backup-dir\n"), PROGRAM_NAME);
printf(_(" [--instance=instance_name [-i backup-id]]\n\n"));
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
printf(_(" [--format=format]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name show info about specific intstance\n"));
printf(_(" -i, --backup-id=backup-id show info about specific backups\n"));
printf(_(" --format=format show format=PLAIN|JSON\n"));
}
static void
@ -473,10 +480,12 @@ help_set_config(void)
static void
help_show_config(void)
{
printf(_("%s show-config -B backup-dir --instance=instance_name\n\n"), PROGRAM_NAME);
printf(_("%s show-config -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--format=format]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" --instance=instance_name name of the instance\n"));
printf(_(" --format=format show format=PLAIN|JSON\n"));
}
static void

View File

@ -10,12 +10,14 @@
#include "pg_probackup.h"
#include "streamutil.h"
#include "utils/thread.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/stat.h>
#include <unistd.h>
#include "pg_getopt.h"
const char *PROGRAM_VERSION = "2.0.17";
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
@ -47,7 +49,6 @@ char *replication_slot = NULL;
/* backup options */
bool backup_logs = false;
bool smooth_checkpoint;
bool from_replica = false;
bool is_remote_backup = false;
/* Wait timeout for WAL segment archiving */
uint32 archive_timeout = 300; /* default is 300 seconds */
@ -64,7 +65,7 @@ static char *target_inclusive;
static TimeLineID target_tli;
static bool target_immediate;
static char *target_name = NULL;
static char *target_action = NULL;;
static char *target_action = NULL;
static pgRecoveryTarget *recovery_target_options = NULL;
@ -84,7 +85,7 @@ uint32 retention_window = 0;
/* compression options */
CompressAlg compress_alg = NOT_DEFINED_COMPRESS;
int compress_level = DEFAULT_COMPRESS_LEVEL;
bool compress_shortcut = false;
bool compress_shortcut = false;
/* other options */
char *instance_name;
@ -95,23 +96,27 @@ static char *wal_file_path;
static char *wal_file_name;
static bool file_overwrite = false;
/* show options */
ShowFormat show_format = SHOW_PLAIN;
/* current settings */
pgBackup current;
ProbackupSubcmd backup_subcmd;
ProbackupSubcmd backup_subcmd = NO_CMD;
bool help = false;
static bool help_opt = false;
static void opt_backup_mode(pgut_option *opt, const char *arg);
static void opt_log_level_console(pgut_option *opt, const char *arg);
static void opt_log_level_file(pgut_option *opt, const char *arg);
static void opt_compress_alg(pgut_option *opt, const char *arg);
static void opt_show_format(pgut_option *opt, const char *arg);
static void compress_init(void);
static pgut_option options[] =
{
/* directory options */
{ 'b', 1, "help", &help, SOURCE_CMDLINE },
{ 'b', 1, "help", &help_opt, SOURCE_CMDLINE },
{ 's', 'D', "pgdata", &pgdata, SOURCE_CMDLINE },
{ 's', 'B', "backup-path", &backup_path, SOURCE_CMDLINE },
/* common options */
@ -150,7 +155,7 @@ static pgut_option options[] =
{ 'b', 131, "expired", &delete_expired, SOURCE_CMDLINE },
{ 'b', 132, "all", &apply_to_all, SOURCE_CMDLINE },
/* TODO not implemented yet */
{ 'b', 133, "force", &force_delete, SOURCE_CMDLINE },
{ 'b', 133, "force", &force_delete, SOURCE_CMDLINE },
/* retention options */
{ 'u', 134, "retention-redundancy", &retention_redundancy, SOURCE_CMDLINE },
{ 'u', 135, "retention-window", &retention_window, SOURCE_CMDLINE },
@ -180,6 +185,8 @@ static pgut_option options[] =
{ 's', 160, "wal-file-path", &wal_file_path, SOURCE_CMDLINE },
{ 's', 161, "wal-file-name", &wal_file_name, SOURCE_CMDLINE },
{ 'b', 162, "overwrite", &file_overwrite, SOURCE_CMDLINE },
/* show options */
{ 'f', 170, "format", opt_show_format, SOURCE_CMDLINE },
{ 0 }
};
@ -189,8 +196,8 @@ static pgut_option options[] =
int
main(int argc, char *argv[])
{
char *command = NULL;
char path[MAXPGPATH];
char *command = NULL,
*command_name;
/* Check if backup_path is directory. */
struct stat stat_buf;
int rc;
@ -204,42 +211,38 @@ main(int argc, char *argv[])
/*
* Save main thread's tid. It is used call exit() in case of errors.
*/
#ifdef WIN32
main_tid = GetCurrentThreadId();
#else
main_tid = pthread_self();
#endif
/* Parse subcommands and non-subcommand options */
if (argc > 1)
{
if (strcmp(argv[1], "archive-push") == 0)
backup_subcmd = ARCHIVE_PUSH;
backup_subcmd = ARCHIVE_PUSH_CMD;
else if (strcmp(argv[1], "archive-get") == 0)
backup_subcmd = ARCHIVE_GET;
backup_subcmd = ARCHIVE_GET_CMD;
else if (strcmp(argv[1], "add-instance") == 0)
backup_subcmd = ADD_INSTANCE;
backup_subcmd = ADD_INSTANCE_CMD;
else if (strcmp(argv[1], "del-instance") == 0)
backup_subcmd = DELETE_INSTANCE;
backup_subcmd = DELETE_INSTANCE_CMD;
else if (strcmp(argv[1], "init") == 0)
backup_subcmd = INIT;
backup_subcmd = INIT_CMD;
else if (strcmp(argv[1], "backup") == 0)
backup_subcmd = BACKUP;
backup_subcmd = BACKUP_CMD;
else if (strcmp(argv[1], "restore") == 0)
backup_subcmd = RESTORE;
backup_subcmd = RESTORE_CMD;
else if (strcmp(argv[1], "validate") == 0)
backup_subcmd = VALIDATE;
backup_subcmd = VALIDATE_CMD;
else if (strcmp(argv[1], "show") == 0)
backup_subcmd = SHOW;
backup_subcmd = SHOW_CMD;
else if (strcmp(argv[1], "delete") == 0)
backup_subcmd = DELETE;
backup_subcmd = DELETE_CMD;
else if (strcmp(argv[1], "set-config") == 0)
backup_subcmd = SET_CONFIG;
backup_subcmd = SET_CONFIG_CMD;
else if (strcmp(argv[1], "show-config") == 0)
backup_subcmd = SHOW_CONFIG;
else if (strcmp(argv[1], "--help") == 0
|| strcmp(argv[1], "help") == 0
|| strcmp(argv[1], "-?") == 0)
backup_subcmd = SHOW_CONFIG_CMD;
else if (strcmp(argv[1], "--help") == 0 ||
strcmp(argv[1], "-?") == 0 ||
strcmp(argv[1], "help") == 0)
{
if (argc > 2)
help_command(argv[2]);
@ -250,35 +253,32 @@ main(int argc, char *argv[])
|| strcmp(argv[1], "version") == 0
|| strcmp(argv[1], "-V") == 0)
{
if (argc == 2)
{
#ifdef PGPRO_VERSION
fprintf(stderr, "%s %s (Postgres Pro %s %s)\n",
PROGRAM_NAME, PROGRAM_VERSION,
PGPRO_VERSION, PGPRO_EDITION);
fprintf(stderr, "%s %s (Postgres Pro %s %s)\n",
PROGRAM_NAME, PROGRAM_VERSION,
PGPRO_VERSION, PGPRO_EDITION);
#else
fprintf(stderr, "%s %s (PostgreSQL %s)\n",
PROGRAM_NAME, PROGRAM_VERSION, PG_VERSION);
fprintf(stderr, "%s %s (PostgreSQL %s)\n",
PROGRAM_NAME, PROGRAM_VERSION, PG_VERSION);
#endif
exit(0);
}
else if (strcmp(argv[2], "--help") == 0)
help_command(argv[1]);
else
elog(ERROR, "Invalid arguments for \"%s\" subcommand", argv[1]);
exit(0);
}
else
elog(ERROR, "Unknown subcommand");
elog(ERROR, "Unknown subcommand \"%s\"", argv[1]);
}
if (backup_subcmd == NO_CMD)
elog(ERROR, "No subcommand specified");
/*
* Make command string before getopt_long() will call. It permutes the
* content of argv.
*/
if (backup_subcmd == BACKUP ||
backup_subcmd == RESTORE ||
backup_subcmd == VALIDATE ||
backup_subcmd == DELETE)
command_name = pstrdup(argv[1]);
if (backup_subcmd == BACKUP_CMD ||
backup_subcmd == RESTORE_CMD ||
backup_subcmd == VALIDATE_CMD ||
backup_subcmd == DELETE_CMD)
{
int i,
len = 0,
@ -305,11 +305,12 @@ main(int argc, char *argv[])
command[len] = '\0';
}
optind += 1;
/* Parse command line arguments */
pgut_getopt(argc, argv, options);
if (help)
help_command(argv[2]);
if (help_opt)
help_command(command_name);
/* backup_path is required for all pg_probackup commands except help */
if (backup_path == NULL)
@ -322,6 +323,7 @@ main(int argc, char *argv[])
if (backup_path == NULL)
elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)");
}
canonicalize_path(backup_path);
/* Ensure that backup_path is an absolute path */
if (!is_absolute_path(backup_path))
@ -342,7 +344,7 @@ main(int argc, char *argv[])
}
/* Option --instance is required for all commands except init and show */
if (backup_subcmd != INIT && backup_subcmd != SHOW && backup_subcmd != VALIDATE)
if (backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD && backup_subcmd != VALIDATE_CMD)
{
if (instance_name == NULL)
elog(ERROR, "required parameter not specified: --instance");
@ -362,7 +364,7 @@ main(int argc, char *argv[])
* for all commands except init, which doesn't take this parameter
* and add-instance which creates new instance.
*/
if (backup_subcmd != INIT && backup_subcmd != ADD_INSTANCE)
if (backup_subcmd != INIT_CMD && backup_subcmd != ADD_INSTANCE_CMD)
{
if (access(backup_instance_path, F_OK) != 0)
elog(ERROR, "Instance '%s' does not exist in this backup catalog",
@ -374,8 +376,10 @@ main(int argc, char *argv[])
* Read options from env variables or from config file,
* unless we're going to set them via set-config.
*/
if (instance_name && backup_subcmd != SET_CONFIG)
if (instance_name && backup_subcmd != SET_CONFIG_CMD)
{
char path[MAXPGPATH];
/* Read environment variables */
pgut_getopt_env(options);
@ -397,10 +401,10 @@ main(int argc, char *argv[])
/* Sanity check of --backup-id option */
if (backup_id_string_param != NULL)
{
if (backup_subcmd != RESTORE
&& backup_subcmd != VALIDATE
&& backup_subcmd != DELETE
&& backup_subcmd != SHOW)
if (backup_subcmd != RESTORE_CMD
&& backup_subcmd != VALIDATE_CMD
&& backup_subcmd != DELETE_CMD
&& backup_subcmd != SHOW_CMD)
elog(ERROR, "Cannot use -i (--backup-id) option together with the '%s' command",
argv[1]);
@ -428,7 +432,7 @@ main(int argc, char *argv[])
pgdata_exclude_dir[i] = "pg_log";
}
if (backup_subcmd == VALIDATE || backup_subcmd == RESTORE)
if (backup_subcmd == VALIDATE_CMD || backup_subcmd == RESTORE_CMD)
{
/* parse all recovery target options into recovery_target_options structure */
recovery_target_options = parseRecoveryTargetOptions(target_time, target_xid,
@ -444,23 +448,24 @@ main(int argc, char *argv[])
/* do actual operation */
switch (backup_subcmd)
{
case ARCHIVE_PUSH:
case ARCHIVE_PUSH_CMD:
return do_archive_push(wal_file_path, wal_file_name, file_overwrite);
case ARCHIVE_GET:
case ARCHIVE_GET_CMD:
return do_archive_get(wal_file_path, wal_file_name);
case ADD_INSTANCE:
case ADD_INSTANCE_CMD:
return do_add_instance();
case DELETE_INSTANCE:
case DELETE_INSTANCE_CMD:
return do_delete_instance();
case INIT:
case INIT_CMD:
return do_init();
case BACKUP:
case BACKUP_CMD:
{
const char *backup_mode;
time_t start_time;
start_time = time(NULL);
backup_mode = deparse_backup_mode(current.backup_mode);
current.stream = stream_wal;
elog(INFO, "Backup start, pg_probackup version: %s, backup ID: %s, backup mode: %s, instance: %s, stream: %s, remote: %s",
PROGRAM_VERSION, base36enc(start_time), backup_mode, instance_name,
@ -468,20 +473,20 @@ main(int argc, char *argv[])
return do_backup(start_time);
}
case RESTORE:
case RESTORE_CMD:
return do_restore_or_validate(current.backup_id,
recovery_target_options,
true);
case VALIDATE:
case VALIDATE_CMD:
if (current.backup_id == 0 && target_time == 0 && target_xid == 0)
return do_validate_all();
else
return do_restore_or_validate(current.backup_id,
recovery_target_options,
false);
case SHOW:
case SHOW_CMD:
return do_show(current.backup_id);
case DELETE:
case DELETE_CMD:
if (delete_expired && backup_id_string_param)
elog(ERROR, "You cannot specify --delete-expired and --backup-id options together");
if (!delete_expired && !delete_wal && !backup_id_string_param)
@ -492,10 +497,13 @@ main(int argc, char *argv[])
return do_retention_purge();
else
return do_delete(current.backup_id);
case SHOW_CONFIG:
case SHOW_CONFIG_CMD:
return do_configure(true);
case SET_CONFIG:
case SET_CONFIG_CMD:
return do_configure(false);
case NO_CMD:
/* Should not happen */
elog(ERROR, "Unknown subcommand");
}
return 0;
@ -519,49 +527,31 @@ opt_log_level_file(pgut_option *opt, const char *arg)
log_level_file = parse_log_level(arg);
}
CompressAlg
parse_compress_alg(const char *arg)
static void
opt_show_format(pgut_option *opt, const char *arg)
{
const char *v = arg;
size_t len;
/* Skip all spaces detected */
while (isspace((unsigned char)*arg))
arg++;
len = strlen(arg);
while (IsSpace(*v))
v++;
len = strlen(v);
if (len == 0)
elog(ERROR, "compress algrorithm is empty");
if (pg_strncasecmp("zlib", arg, len) == 0)
return ZLIB_COMPRESS;
else if (pg_strncasecmp("pglz", arg, len) == 0)
return PGLZ_COMPRESS;
else if (pg_strncasecmp("none", arg, len) == 0)
return NONE_COMPRESS;
else
elog(ERROR, "invalid compress algorithm value \"%s\"", arg);
return NOT_DEFINED_COMPRESS;
}
const char*
deparse_compress_alg(int alg)
{
switch (alg)
if (len > 0)
{
case NONE_COMPRESS:
case NOT_DEFINED_COMPRESS:
return "none";
case ZLIB_COMPRESS:
return "zlib";
case PGLZ_COMPRESS:
return "pglz";
if (pg_strncasecmp("plain", v, len) == 0)
show_format = SHOW_PLAIN;
else if (pg_strncasecmp("json", v, len) == 0)
show_format = SHOW_JSON;
else
elog(ERROR, "Invalid show format \"%s\"", arg);
}
return NULL;
else
elog(ERROR, "Invalid show format \"%s\"", arg);
}
void
static void
opt_compress_alg(pgut_option *opt, const char *arg)
{
compress_alg = parse_compress_alg(arg);
@ -570,17 +560,17 @@ opt_compress_alg(pgut_option *opt, const char *arg)
/*
* Initialize compress and sanity checks for compress.
*/
static
void compress_init(void)
static void
compress_init(void)
{
/* Default algorithm is zlib */
if (compress_shortcut)
compress_alg = ZLIB_COMPRESS;
if (backup_subcmd != SET_CONFIG)
if (backup_subcmd != SET_CONFIG_CMD)
{
if (compress_level != DEFAULT_COMPRESS_LEVEL
&& compress_alg == NONE_COMPRESS)
&& compress_alg == NOT_DEFINED_COMPRESS)
elog(ERROR, "Cannot specify compress-level option without compress-alg option");
}
@ -590,7 +580,7 @@ void compress_init(void)
if (compress_level == 0)
compress_alg = NOT_DEFINED_COMPRESS;
if (backup_subcmd == BACKUP || backup_subcmd == ARCHIVE_PUSH)
if (backup_subcmd == BACKUP_CMD || backup_subcmd == ARCHIVE_PUSH_CMD)
{
#ifndef HAVE_LIBZ
if (compress_alg == ZLIB_COMPRESS)

View File

@ -15,10 +15,6 @@
#include <limits.h>
#include <libpq-fe.h>
#ifndef WIN32
#include <sys/mman.h>
#endif
#include "access/timeline.h"
#include "access/xlogdefs.h"
#include "access/xlog_internal.h"
@ -28,6 +24,13 @@
#include "storage/checksum.h"
#include "utils/pg_crc.h"
#include "common/relpath.h"
#include "port.h"
#ifdef FRONTEND
#undef FRONTEND
#include "port/atomics.h"
#define FRONTEND
#endif
#include "utils/parray.h"
#include "utils/pgut.h"
@ -85,9 +88,10 @@ typedef struct pgFile
size_t size; /* size of the file */
size_t read_size; /* size of the portion read (if only some pages are
backed up, it's different from size) */
size_t write_size; /* size of the backed-up file. BYTES_INVALID means
int64 write_size; /* size of the backed-up file. BYTES_INVALID means
that the file existed but was not backed up
because not modified since last backup. */
/* we need int64 here to store '-1' value */
pg_crc32 crc; /* CRC value of the file, regular file only */
char *linked; /* path of the linked file */
bool is_datafile; /* true if the file is PostgreSQL data file */
@ -102,13 +106,14 @@ typedef struct pgFile
bool is_database;
bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */
CompressAlg compress_alg; /* compression algorithm applied to the file */
volatile uint32 lock; /* lock for synchronization of parallel threads */
volatile pg_atomic_flag lock; /* lock for synchronization of parallel threads */
datapagemap_t pagemap; /* bitmap of pages updated since previous backup */
bool pagemap_isabsent; /* Used to mark files with unknown state of pagemap,
* i.e. datafiles without _ptrack */
} pgFile;
/* Special values of datapagemap_t bitmapsize */
#define PageBitmapIsEmpty 0 /* Used to mark unchanged datafiles */
#define PageBitmapIsAbsent -1 /* Used to mark files with unknown state of pagemap, i.e. datafiles without _ptrack */
/* Current state of backup */
typedef enum BackupStatus
@ -135,23 +140,30 @@ typedef enum BackupMode
typedef enum ProbackupSubcmd
{
INIT = 0,
ARCHIVE_PUSH,
ARCHIVE_GET,
ADD_INSTANCE,
DELETE_INSTANCE,
BACKUP,
RESTORE,
VALIDATE,
SHOW,
DELETE,
SET_CONFIG,
SHOW_CONFIG
NO_CMD = 0,
INIT_CMD,
ARCHIVE_PUSH_CMD,
ARCHIVE_GET_CMD,
ADD_INSTANCE_CMD,
DELETE_INSTANCE_CMD,
BACKUP_CMD,
RESTORE_CMD,
VALIDATE_CMD,
SHOW_CMD,
DELETE_CMD,
SET_CONFIG_CMD,
SHOW_CONFIG_CMD
} ProbackupSubcmd;
typedef enum ShowFormat
{
SHOW_PLAIN,
SHOW_JSON
} ShowFormat;
/* special values of pgBackup fields */
#define INVALID_BACKUP_ID 0 /* backup ID is not provided by user */
#define INVALID_BACKUP_ID 0 /* backup ID is not provided by user */
#define BYTES_INVALID (-1)
typedef struct pgBackupConfig
@ -214,20 +226,25 @@ typedef struct pgBackup
/* Size of WAL files in archive needed to restore this backup */
int64 wal_bytes;
CompressAlg compress_alg;
int compress_level;
/* Fields needed for compatibility check */
uint32 block_size;
uint32 wal_block_size;
uint32 checksum_version;
char program_version[100];
char server_version[100];
bool stream; /* Was this backup taken in stream mode?
bool stream; /* Was this backup taken in stream mode?
* i.e. does it include all needed WAL files? */
bool from_replica; /* Was this backup taken from replica */
time_t parent_backup; /* Identifier of the previous backup.
* Which is basic backup for this
* incremental backup. */
char *primary_conninfo; /* Connection parameters of the backup
* in the format suitable for recovery.conf */
char *primary_conninfo; /* Connection parameters of the backup
* in the format suitable for recovery.conf */
} pgBackup;
/* Recovery target for restore and validate subcommands */
@ -311,7 +328,6 @@ extern char *replication_slot;
/* backup options */
extern bool smooth_checkpoint;
extern uint32 archive_timeout;
extern bool from_replica;
extern bool is_remote_backup;
extern const char *master_db;
extern const char *master_host;
@ -341,7 +357,7 @@ extern CompressAlg compress_alg;
extern int compress_level;
extern bool compress_shortcut;
#define DEFAULT_COMPRESS_LEVEL 6
#define DEFAULT_COMPRESS_LEVEL 1
extern CompressAlg parse_compress_alg(const char *arg);
extern const char* deparse_compress_alg(int alg);
@ -349,9 +365,12 @@ extern const char* deparse_compress_alg(int alg);
extern char *instance_name;
extern uint64 system_identifier;
/* show options */
extern ShowFormat show_format;
/* current settings */
extern pgBackup current;
extern ProbackupSubcmd backup_subcmd;
extern ProbackupSubcmd backup_subcmd;
/* in dir.c */
/* exclude directory list for $PGDATA file listing */
@ -519,4 +538,16 @@ extern void pgBackup_init(pgBackup *backup);
/* in status.c */
extern bool is_pg_running(void);
#ifdef WIN32
#ifdef _DEBUG
#define lseek _lseek
#define open _open
#define fstat _fstat
#define read _read
#define close _close
#define write _write
#define mkdir(dir,mode) _mkdir(dir)
#endif
#endif
#endif /* PG_PROBACKUP_H */

View File

@ -14,14 +14,14 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <pthread.h>
#include "catalog/pg_control.h"
#include "utils/thread.h"
typedef struct
{
parray *files;
pgBackup *backup;
parray *files;
pgBackup *backup;
/*
* Return value from the thread.
@ -65,7 +65,7 @@ static void check_tablespace_mapping(pgBackup *backup);
static void create_recovery_conf(time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup);
static void restore_files(void *arg);
static void *restore_files(void *arg);
static void remove_deleted_files(pgBackup *backup);
static const char *get_tablespace_mapping(const char *dir);
static void set_tablespace_created(const char *link, const char *dir);
@ -80,13 +80,11 @@ static TablespaceCreatedList tablespace_created_dirs = {NULL, NULL};
* Entry point of pg_probackup RESTORE and VALIDATE subcommands.
*/
int
do_restore_or_validate(time_t target_backup_id,
pgRecoveryTarget *rt,
bool is_restore)
do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
bool is_restore)
{
int i;
parray *backups;
parray *timelines;
pgBackup *current_backup = NULL;
pgBackup *dest_backup = NULL;
pgBackup *base_full_backup = NULL;
@ -94,7 +92,7 @@ do_restore_or_validate(time_t target_backup_id,
int dest_backup_index = 0;
int base_full_backup_index = 0;
int corrupted_backup_index = 0;
char *action = is_restore ? "Restore":"Validate";
char *action = is_restore ? "Restore":"Validate";
if (is_restore)
{
@ -169,6 +167,8 @@ do_restore_or_validate(time_t target_backup_id,
if (rt->recovery_target_tli)
{
parray *timelines;
elog(LOG, "target timeline ID = %u", rt->recovery_target_tli);
/* Read timeline history files from archives */
timelines = readTimeLineHistory_probackup(rt->recovery_target_tli);
@ -370,8 +370,9 @@ restore_backup(pgBackup *backup)
char list_path[MAXPGPATH];
parray *files;
int i;
pthread_t restore_threads[num_threads];
restore_files_args *restore_threads_args[num_threads];
/* arrays with meta info for multi threaded backup */
pthread_t *restore_threads;
restore_files_args *restore_threads_args;
bool restore_isok = true;
if (backup->status != BACKUP_STATUS_OK)
@ -405,17 +406,21 @@ restore_backup(pgBackup *backup)
pgBackupGetPath(backup, list_path, lengthof(list_path), DATABASE_FILE_LIST);
files = dir_read_file_list(database_path, list_path);
restore_threads = (pthread_t *) palloc(sizeof(pthread_t)*num_threads);
restore_threads_args = (restore_files_args *) palloc(sizeof(restore_files_args)*num_threads);
/* setup threads */
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
__sync_lock_release(&file->lock);
pg_atomic_clear_flag(&file->lock);
}
/* Restore files into target directory */
for (i = 0; i < num_threads; i++)
{
restore_files_args *arg = pg_malloc(sizeof(restore_files_args));
restore_files_args *arg = &(restore_threads_args[i]);
arg->files = files;
arg->backup = backup;
/* By default there are some error */
@ -423,23 +428,22 @@ restore_backup(pgBackup *backup)
elog(LOG, "Start thread for num:%li", parray_num(files));
restore_threads_args[i] = arg;
pthread_create(&restore_threads[i], NULL,
(void *(*)(void *)) restore_files, arg);
pthread_create(&restore_threads[i], NULL, restore_files, arg);
}
/* Wait theads */
for (i = 0; i < num_threads; i++)
{
pthread_join(restore_threads[i], NULL);
if (restore_threads_args[i]->ret == 1)
if (restore_threads_args[i].ret == 1)
restore_isok = false;
pg_free(restore_threads_args[i]);
}
if (!restore_isok)
elog(ERROR, "Data files restoring failed");
pfree(restore_threads);
pfree(restore_threads_args);
/* cleanup */
parray_walk(files, pgFileFree);
parray_free(files);
@ -460,7 +464,7 @@ remove_deleted_files(pgBackup *backup)
parray *files;
parray *files_restored;
char filelist_path[MAXPGPATH];
int i;
int i;
pgBackupGetPath(backup, filelist_path, lengthof(filelist_path), DATABASE_FILE_LIST);
/* Read backup's filelist using target database path as base path */
@ -585,14 +589,6 @@ restore_directories(const char *pg_data_dir, const char *backup_dir)
linked_path, dir_created, link_name);
}
/*
* This check was done in check_tablespace_mapping(). But do
* it again.
*/
if (!dir_is_empty(linked_path))
elog(ERROR, "restore tablespace destination is not empty: \"%s\"",
linked_path);
if (link_sep)
elog(LOG, "create directory \"%s\" and symbolic link \"%.*s\"",
linked_path,
@ -710,7 +706,7 @@ check_tablespace_mapping(pgBackup *backup)
/*
* Restore files into $PGDATA.
*/
static void
static void *
restore_files(void *arg)
{
int i;
@ -722,7 +718,7 @@ restore_files(void *arg)
char *rel_path;
pgFile *file = (pgFile *) parray_get(arguments->files, i);
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
if (!pg_atomic_test_set_flag(&file->lock))
continue;
pgBackupGetPath(arguments->backup, from_root,
@ -781,12 +777,14 @@ restore_files(void *arg)
/* print size of restored file */
if (file->write_size != BYTES_INVALID)
elog(LOG, "Restored file %s : %lu bytes",
file->path, (unsigned long) file->write_size);
elog(LOG, "Restored file %s : " INT64_FORMAT " bytes",
file->path, file->write_size);
}
/* Data files restoring is successful */
arguments->ret = 0;
return NULL;
}
/* Create recovery.conf with given recovery target parameters */
@ -795,9 +793,9 @@ create_recovery_conf(time_t backup_id,
pgRecoveryTarget *rt,
pgBackup *backup)
{
char path[MAXPGPATH];
FILE *fp;
bool need_restore_conf = false;
char path[MAXPGPATH];
FILE *fp;
bool need_restore_conf = false;
if (!backup->stream
|| (rt->time_specified || rt->xid_specified))
@ -967,7 +965,8 @@ readTimeLineHistory_probackup(TimeLineID targetTLI)
entry = pgut_new(TimeLineHistoryEntry);
entry->tli = targetTLI;
/* LSN in target timeline is valid */
entry->end = (uint32) (-1UL << 32) | -1UL;
/* TODO ensure that -1UL --> -1L fix is correct */
entry->end = (uint32) (-1L << 32) | -1L;
parray_insert(result, 0, entry);
return result;
@ -988,10 +987,13 @@ satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt)
bool
satisfy_timeline(const parray *timelines, const pgBackup *backup)
{
int i;
int i;
for (i = 0; i < parray_num(timelines); i++)
{
TimeLineHistoryEntry *timeline = (TimeLineHistoryEntry *) parray_get(timelines, i);
TimeLineHistoryEntry *timeline;
timeline = (TimeLineHistoryEntry *) parray_get(timelines, i);
if (backup->tli == timeline->tli &&
backup->stop_lsn < timeline->end)
return true;
@ -1012,14 +1014,14 @@ parseRecoveryTargetOptions(const char *target_time,
const char *target_action,
bool restore_no_validate)
{
time_t dummy_time;
TransactionId dummy_xid;
bool dummy_bool;
time_t dummy_time;
TransactionId dummy_xid;
bool dummy_bool;
/*
* count the number of the mutually exclusive options which may specify
* recovery target. If final value > 1, throw an error.
*/
int recovery_target_specified = 0;
int recovery_target_specified = 0;
pgRecoveryTarget *rt = pgut_new(pgRecoveryTarget);
/* fill all options with default values */
@ -1044,7 +1046,7 @@ parseRecoveryTargetOptions(const char *target_time,
rt->time_specified = true;
rt->target_time_string = target_time;
if (parse_time(target_time, &dummy_time))
if (parse_time(target_time, &dummy_time, false))
rt->recovery_target_time = dummy_time;
else
elog(ERROR, "Invalid value of --time option %s", target_time);

View File

@ -3,28 +3,40 @@
* show.c: show backup information.
*
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2017, Postgres Professional
* Portions Copyright (c) 2015-2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "pg_probackup.h"
#include <time.h>
#include <unistd.h>
#include <dirent.h>
#include <sys/stat.h>
#include "pqexpbuffer.h"
static void show_backup_list(FILE *out, parray *backup_list);
static void show_backup_detail(FILE *out, pgBackup *backup);
static int do_show_instance(time_t requested_backup_id);
#include "utils/json.h"
static void show_instance_start(void);
static void show_instance_end(void);
static void show_instance(time_t requested_backup_id, bool show_name);
static int show_backup(time_t requested_backup_id);
static void show_instance_plain(parray *backup_list, bool show_name);
static void show_instance_json(parray *backup_list);
static PQExpBufferData show_buf;
static bool first_instance = true;
static int32 json_level = 0;
int
do_show(time_t requested_backup_id)
{
if (instance_name == NULL
&& requested_backup_id != INVALID_BACKUP_ID)
if (instance_name == NULL &&
requested_backup_id != INVALID_BACKUP_ID)
elog(ERROR, "You must specify --instance to use --backup_id option");
if (instance_name == NULL)
@ -38,10 +50,12 @@ do_show(time_t requested_backup_id)
join_path_components(path, backup_path, BACKUPS_DIR);
dir = opendir(path);
if (dir == NULL)
elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno));
elog(ERROR, "Cannot open directory \"%s\": %s",
path, strerror(errno));
errno = 0;
while ((dent = readdir(dir)))
show_instance_start();
while (errno = 0, (dent = readdir(dir)) != NULL)
{
char child[MAXPGPATH];
struct stat st;
@ -54,73 +68,47 @@ do_show(time_t requested_backup_id)
join_path_components(child, path, dent->d_name);
if (lstat(child, &st) == -1)
elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno));
elog(ERROR, "Cannot stat file \"%s\": %s",
child, strerror(errno));
if (!S_ISDIR(st.st_mode))
continue;
instance_name = dent->d_name;
sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name);
fprintf(stdout, "\nBACKUP INSTANCE '%s'\n", instance_name);
do_show_instance(0);
show_instance(INVALID_BACKUP_ID, true);
}
if (errno)
elog(ERROR, "Cannot read directory \"%s\": %s",
path, strerror(errno));
if (closedir(dir))
elog(ERROR, "Cannot close directory \"%s\": %s",
path, strerror(errno));
show_instance_end();
return 0;
}
else if (requested_backup_id == INVALID_BACKUP_ID ||
show_format == SHOW_JSON)
{
show_instance_start();
show_instance(requested_backup_id, false);
show_instance_end();
return 0;
}
else
return do_show_instance(requested_backup_id);
}
/*
* If 'requested_backup_id' is INVALID_BACKUP_ID, show brief meta information
* about all backups in the backup instance.
* If valid backup id is passed, show detailed meta information
* about specified backup.
*/
static int
do_show_instance(time_t requested_backup_id)
{
if (requested_backup_id != INVALID_BACKUP_ID)
{
pgBackup *backup;
backup = read_backup(requested_backup_id);
if (backup == NULL)
{
elog(INFO, "Requested backup \"%s\" is not found.",
/* We do not need free base36enc's result, we exit anyway */
base36enc(requested_backup_id));
/* This is not error */
return 0;
}
show_backup_detail(stdout, backup);
/* cleanup */
pgBackupFree(backup);
}
else
{
parray *backup_list;
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
if (backup_list == NULL)
elog(ERROR, "Failed to get backup list.");
show_backup_list(stdout, backup_list);
/* cleanup */
parray_walk(backup_list, pgBackupFree);
parray_free(backup_list);
}
return 0;
return show_backup(requested_backup_id);
}
static void
pretty_size(int64 size, char *buf, size_t len)
{
int exp = 0;
int exp = 0;
/* minus means the size is invalid */
if (size < 0)
@ -219,16 +207,113 @@ get_parent_tli(TimeLineID child_tli)
return result;
}
/*
* Initialize instance visualization.
*/
static void
show_backup_list(FILE *out, parray *backup_list)
show_instance_start(void)
{
initPQExpBuffer(&show_buf);
if (show_format == SHOW_PLAIN)
return;
first_instance = true;
json_level = 0;
appendPQExpBufferChar(&show_buf, '[');
json_level++;
}
/*
* Finalize instance visualization.
*/
static void
show_instance_end(void)
{
if (show_format == SHOW_JSON)
appendPQExpBufferStr(&show_buf, "\n]\n");
fputs(show_buf.data, stdout);
termPQExpBuffer(&show_buf);
}
/*
* Show brief meta information about all backups in the backup instance.
*/
static void
show_instance(time_t requested_backup_id, bool show_name)
{
parray *backup_list;
backup_list = catalog_get_backup_list(requested_backup_id);
if (backup_list == NULL)
elog(ERROR, "Failed to get backup list.");
if (show_format == SHOW_PLAIN)
show_instance_plain(backup_list, show_name);
else if (show_format == SHOW_JSON)
show_instance_json(backup_list);
else
elog(ERROR, "Invalid show format %d", (int) show_format);
/* cleanup */
parray_walk(backup_list, pgBackupFree);
parray_free(backup_list);
}
/*
* Show detailed meta information about specified backup.
*/
static int
show_backup(time_t requested_backup_id)
{
pgBackup *backup;
backup = read_backup(requested_backup_id);
if (backup == NULL)
{
elog(INFO, "Requested backup \"%s\" is not found.",
/* We do not need free base36enc's result, we exit anyway */
base36enc(requested_backup_id));
/* This is not error */
return 0;
}
if (show_format == SHOW_PLAIN)
pgBackupWriteControl(stdout, backup);
else
elog(ERROR, "Invalid show format %d", (int) show_format);
/* cleanup */
pgBackupFree(backup);
return 0;
}
/*
* Plain output.
*/
/*
* Show instance backups in plain format.
*/
static void
show_instance_plain(parray *backup_list, bool show_name)
{
int i;
if (show_name)
printfPQExpBuffer(&show_buf, "\nBACKUP INSTANCE '%s'\n", instance_name);
/* if you add new fields here, fix the header */
/* show header */
fputs("============================================================================================================================================\n", out);
fputs(" Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n", out);
fputs("============================================================================================================================================\n", out);
appendPQExpBufferStr(&show_buf,
"============================================================================================================================================\n");
appendPQExpBufferStr(&show_buf,
" Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n");
appendPQExpBufferStr(&show_buf,
"============================================================================================================================================\n");
for (i = 0; i < parray_num(backup_list); i++)
{
@ -255,27 +340,163 @@ show_backup_list(FILE *out, parray *backup_list)
/* Get parent timeline before printing */
parent_tli = get_parent_tli(backup->tli);
fprintf(out, " %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n",
instance_name,
(backup->server_version[0] ? backup->server_version : "----"),
base36enc(backup->start_time),
timestamp,
pgBackupGetBackupMode(backup),
backup->stream ? "STREAM": "ARCHIVE",
backup->tli,
parent_tli,
duration,
data_bytes_str,
(uint32) (backup->start_lsn >> 32),
(uint32) backup->start_lsn,
(uint32) (backup->stop_lsn >> 32),
(uint32) backup->stop_lsn,
status2str(backup->status));
appendPQExpBuffer(&show_buf,
" %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n",
instance_name,
(backup->server_version[0] ? backup->server_version : "----"),
base36enc(backup->start_time),
timestamp,
pgBackupGetBackupMode(backup),
backup->stream ? "STREAM": "ARCHIVE",
backup->tli,
parent_tli,
duration,
data_bytes_str,
(uint32) (backup->start_lsn >> 32),
(uint32) backup->start_lsn,
(uint32) (backup->stop_lsn >> 32),
(uint32) backup->stop_lsn,
status2str(backup->status));
}
}
/*
* Json output.
*/
/*
* Show instance backups in json format.
*/
static void
show_backup_detail(FILE *out, pgBackup *backup)
show_instance_json(parray *backup_list)
{
pgBackupWriteControl(out, backup);
int i;
PQExpBuffer buf = &show_buf;
if (!first_instance)
appendPQExpBufferChar(buf, ',');
/* Begin of instance object */
json_add(buf, JT_BEGIN_OBJECT, &json_level);
json_add_value(buf, "instance", instance_name, json_level, false);
json_add_key(buf, "backups", json_level, true);
/*
* List backups.
*/
json_add(buf, JT_BEGIN_ARRAY, &json_level);
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = parray_get(backup_list, i);
TimeLineID parent_tli;
char timestamp[100] = "----";
char lsn[20];
if (i != 0)
appendPQExpBufferChar(buf, ',');
json_add(buf, JT_BEGIN_OBJECT, &json_level);
json_add_value(buf, "id", base36enc(backup->start_time), json_level,
false);
if (backup->parent_backup != 0)
json_add_value(buf, "parent-backup-id",
base36enc(backup->parent_backup), json_level, true);
json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup),
json_level, true);
json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE",
json_level, true);
json_add_value(buf, "compress-alg",
deparse_compress_alg(backup->compress_alg), json_level,
true);
json_add_key(buf, "compress-level", json_level, true);
appendPQExpBuffer(buf, "%d", backup->compress_level);
json_add_value(buf, "from-replica",
backup->from_replica ? "true" : "false", json_level,
true);
json_add_key(buf, "block-size", json_level, true);
appendPQExpBuffer(buf, "%u", backup->block_size);
json_add_key(buf, "xlog-block-size", json_level, true);
appendPQExpBuffer(buf, "%u", backup->wal_block_size);
json_add_key(buf, "checksum-version", json_level, true);
appendPQExpBuffer(buf, "%u", backup->checksum_version);
json_add_value(buf, "program-version", backup->program_version,
json_level, true);
json_add_value(buf, "server-version", backup->server_version,
json_level, true);
json_add_key(buf, "current-tli", json_level, true);
appendPQExpBuffer(buf, "%d", backup->tli);
json_add_key(buf, "parent-tli", json_level, true);
parent_tli = get_parent_tli(backup->tli);
appendPQExpBuffer(buf, "%u", parent_tli);
snprintf(lsn, lengthof(lsn), "%X/%X",
(uint32) (backup->start_lsn >> 32), (uint32) backup->start_lsn);
json_add_value(buf, "start-lsn", lsn, json_level, true);
snprintf(lsn, lengthof(lsn), "%X/%X",
(uint32) (backup->stop_lsn >> 32), (uint32) backup->stop_lsn);
json_add_value(buf, "stop-lsn", lsn, json_level, true);
time2iso(timestamp, lengthof(timestamp), backup->start_time);
json_add_value(buf, "start-time", timestamp, json_level, true);
if (backup->end_time)
{
time2iso(timestamp, lengthof(timestamp), backup->end_time);
json_add_value(buf, "end-time", timestamp, json_level, true);
}
json_add_key(buf, "recovery-xid", json_level, true);
appendPQExpBuffer(buf, XID_FMT, backup->recovery_xid);
if (backup->recovery_time > 0)
{
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
json_add_value(buf, "recovery-time", timestamp, json_level, true);
}
if (backup->data_bytes != BYTES_INVALID)
{
json_add_key(buf, "data-bytes", json_level, true);
appendPQExpBuffer(buf, INT64_FORMAT, backup->data_bytes);
}
if (backup->wal_bytes != BYTES_INVALID)
{
json_add_key(buf, "wal-bytes", json_level, true);
appendPQExpBuffer(buf, INT64_FORMAT, backup->wal_bytes);
}
if (backup->primary_conninfo)
json_add_value(buf, "primary_conninfo", backup->primary_conninfo,
json_level, true);
json_add_value(buf, "status", status2str(backup->status), json_level,
true);
json_add(buf, JT_END_OBJECT, &json_level);
}
/* End of backups */
json_add(buf, JT_END_ARRAY, &json_level);
/* End of instance object */
json_add(buf, JT_END_OBJECT, &json_level);
first_instance = false;
}

View File

@ -38,7 +38,7 @@ get_pgpid(void)
snprintf(pid_file, lengthof(pid_file), "%s/postmaster.pid", pgdata);
pidf = fopen(pid_file, "r");
pidf = fopen(pid_file, PG_BINARY_R);
if (pidf == NULL)
{
/* No pid file, not an error on startup */

View File

@ -176,8 +176,8 @@ uint32
get_data_checksum_version(bool safe)
{
ControlFileData ControlFile;
char *buffer;
size_t size;
char *buffer;
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata, "global/pg_control", &size, safe);
@ -191,7 +191,7 @@ get_data_checksum_version(bool safe)
/*
* Convert time_t value to ISO-8601 format string
* Convert time_t value to ISO-8601 format string. Always set timezone offset.
*/
void
time2iso(char *buf, size_t len, time_t time)
@ -199,25 +199,23 @@ time2iso(char *buf, size_t len, time_t time)
struct tm *ptm = gmtime(&time);
time_t gmt = mktime(ptm);
time_t offset;
char *ptr = buf;
ptm = localtime(&time);
offset = time - gmt + (ptm->tm_isdst ? 3600 : 0);
strftime(buf, len, "%Y-%m-%d %H:%M:%S", ptm);
strftime(ptr, len, "%Y-%m-%d %H:%M:%S", ptm);
if (offset != 0)
ptr += strlen(ptr);
snprintf(ptr, len - (ptr - buf), "%c%02d",
(offset >= 0) ? '+' : '-',
abs((int) offset) / SECS_PER_HOUR);
if (abs((int) offset) % SECS_PER_HOUR != 0)
{
buf += strlen(buf);
sprintf(buf, "%c%02d",
(offset >= 0) ? '+' : '-',
abs((int) offset) / SECS_PER_HOUR);
if (abs((int) offset) % SECS_PER_HOUR != 0)
{
buf += strlen(buf);
sprintf(buf, ":%02d",
abs((int) offset % SECS_PER_HOUR) / SECS_PER_MINUTE);
}
ptr += strlen(ptr);
snprintf(ptr, len - (ptr - buf), ":%02d",
abs((int) offset % SECS_PER_HOUR) / SECS_PER_MINUTE);
}
}
@ -310,11 +308,21 @@ pgBackup_init(pgBackup *backup)
backup->end_time = (time_t) 0;
backup->recovery_xid = 0;
backup->recovery_time = (time_t) 0;
backup->data_bytes = BYTES_INVALID;
backup->wal_bytes = BYTES_INVALID;
backup->compress_alg = NOT_DEFINED_COMPRESS;
backup->compress_level = 0;
backup->block_size = BLCKSZ;
backup->wal_block_size = XLOG_BLCKSZ;
backup->checksum_version = 0;
backup->stream = false;
backup->from_replica = false;
backup->parent_backup = 0;
backup->primary_conninfo = NULL;
backup->program_version[0] = '\0';
backup->server_version[0] = '\0';
}

134
src/utils/json.c Normal file
View File

@ -0,0 +1,134 @@
/*-------------------------------------------------------------------------
*
* json.c: - make json document.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "json.h"
static void json_add_indent(PQExpBuffer buf, int32 level);
static void json_add_escaped(PQExpBuffer buf, const char *str);
/*
* Start or end json token. Currently it is a json object or array.
*
* Function modifies level value and adds indent if it appropriate.
*/
void
json_add(PQExpBuffer buf, JsonToken type, int32 *level)
{
switch (type)
{
case JT_BEGIN_ARRAY:
appendPQExpBufferChar(buf, '[');
*level += 1;
break;
case JT_END_ARRAY:
*level -= 1;
if (*level == 0)
appendPQExpBufferChar(buf, '\n');
else
json_add_indent(buf, *level);
appendPQExpBufferChar(buf, ']');
break;
case JT_BEGIN_OBJECT:
json_add_indent(buf, *level);
appendPQExpBufferChar(buf, '{');
*level += 1;
break;
case JT_END_OBJECT:
*level -= 1;
if (*level == 0)
appendPQExpBufferChar(buf, '\n');
else
json_add_indent(buf, *level);
appendPQExpBufferChar(buf, '}');
break;
default:
break;
}
}
/*
* Add json object's key. If it isn't first key we need to add a comma.
*/
void
json_add_key(PQExpBuffer buf, const char *name, int32 level, bool add_comma)
{
if (add_comma)
appendPQExpBufferChar(buf, ',');
json_add_indent(buf, level);
json_add_escaped(buf, name);
appendPQExpBufferStr(buf, ": ");
}
/*
* Add json object's key and value. If it isn't first key we need to add a
* comma.
*/
void
json_add_value(PQExpBuffer buf, const char *name, const char *value,
int32 level, bool add_comma)
{
json_add_key(buf, name, level, add_comma);
json_add_escaped(buf, value);
}
static void
json_add_indent(PQExpBuffer buf, int32 level)
{
uint16 i;
if (level == 0)
return;
appendPQExpBufferChar(buf, '\n');
for (i = 0; i < level; i++)
appendPQExpBufferStr(buf, " ");
}
static void
json_add_escaped(PQExpBuffer buf, const char *str)
{
const char *p;
appendPQExpBufferChar(buf, '"');
for (p = str; *p; p++)
{
switch (*p)
{
case '\b':
appendPQExpBufferStr(buf, "\\b");
break;
case '\f':
appendPQExpBufferStr(buf, "\\f");
break;
case '\n':
appendPQExpBufferStr(buf, "\\n");
break;
case '\r':
appendPQExpBufferStr(buf, "\\r");
break;
case '\t':
appendPQExpBufferStr(buf, "\\t");
break;
case '"':
appendPQExpBufferStr(buf, "\\\"");
break;
case '\\':
appendPQExpBufferStr(buf, "\\\\");
break;
default:
if ((unsigned char) *p < ' ')
appendPQExpBuffer(buf, "\\u%04x", (int) *p);
else
appendPQExpBufferChar(buf, *p);
break;
}
}
appendPQExpBufferChar(buf, '"');
}

33
src/utils/json.h Normal file
View File

@ -0,0 +1,33 @@
/*-------------------------------------------------------------------------
*
* json.h: - prototypes of json output functions.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#ifndef PROBACKUP_JSON_H
#define PROBACKUP_JSON_H
#include "postgres_fe.h"
#include "pqexpbuffer.h"
/*
* Json document tokens.
*/
typedef enum
{
JT_BEGIN_ARRAY,
JT_END_ARRAY,
JT_BEGIN_OBJECT,
JT_END_OBJECT
} JsonToken;
extern void json_add(PQExpBuffer buf, JsonToken type, int32 *level);
extern void json_add_key(PQExpBuffer buf, const char *name, int32 level,
bool add_comma);
extern void json_add_value(PQExpBuffer buf, const char *name, const char *value,
int32 level, bool add_comma);
#endif /* PROBACKUP_JSON_H */

View File

@ -8,7 +8,6 @@
*/
#include <errno.h>
#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
@ -16,6 +15,7 @@
#include "logger.h"
#include "pgut.h"
#include "thread.h"
/* Logger parameters */
@ -69,6 +69,9 @@ static bool exit_hook_registered = false;
static bool loggin_in_progress = false;
static pthread_mutex_t log_file_mutex = PTHREAD_MUTEX_INITIALIZER;
#ifdef WIN32
static long mutex_initlock = 0;
#endif
void
init_logger(const char *root_path)
@ -138,11 +141,10 @@ exit_if_necessary(int elevel)
}
/* If this is not the main thread then don't call exit() */
if (main_tid != pthread_self())
#ifdef WIN32
if (main_tid != GetCurrentThreadId())
ExitThread(elevel);
#else
if (!pthread_equal(main_tid, pthread_self()))
pthread_exit(NULL);
#endif
else
@ -174,6 +176,19 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
/*
* There is no need to lock if this is elog() from upper elog().
*/
#ifdef WIN32
std_args = NULL;
error_args = NULL;
if (log_file_mutex == NULL)
{
while (InterlockedExchange(&mutex_initlock, 1) == 1)
/* loop, another thread own the lock */ ;
if (log_file_mutex == NULL)
pthread_mutex_init(&log_file_mutex, NULL);
InterlockedExchange(&mutex_initlock, 0);
}
#endif
pthread_mutex_lock(&log_file_mutex);
loggin_in_progress = true;
@ -226,7 +241,7 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
fprintf(error_log_file, "%s: ", strfbuf);
write_elevel(error_log_file, elevel);
vfprintf(error_log_file, fmt, error_args);
fputc('\n', error_log_file);
fflush(error_log_file);
@ -241,7 +256,6 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
if (write_to_stderr)
{
write_elevel(stderr, elevel);
if (write_to_file)
vfprintf(stderr, fmt, std_args);
else

View File

@ -42,12 +42,6 @@ static char *password = NULL;
bool prompt_password = true;
bool force_password = false;
#ifdef WIN32
DWORD main_tid = 0;
#else
pthread_t main_tid = 0;
#endif
/* Database connections */
static PGcancel *volatile cancel_conn = NULL;
@ -141,8 +135,10 @@ static const unit_conversion time_unit_conversion_table[] =
static size_t
option_length(const pgut_option opts[])
{
size_t len;
size_t len;
for (len = 0; opts && opts[len].type; len++) { }
return len;
}
@ -162,7 +158,7 @@ option_has_arg(char type)
static void
option_copy(struct option dst[], const pgut_option opts[], size_t len)
{
size_t i;
size_t i;
for (i = 0; i < len; i++)
{
@ -260,7 +256,8 @@ assign_option(pgut_option *opt, const char *optarg, pgut_optsrc src)
message = "a valid string. But provided: ";
break;
case 't':
if (parse_time(optarg, opt->var))
if (parse_time(optarg, opt->var,
opt->source == SOURCE_FILE))
return;
message = "a time";
break;
@ -750,9 +747,12 @@ parse_uint64(const char *value, uint64 *result, int flags)
/*
* Convert ISO-8601 format string to time_t value.
*
* If utc_default is true, then if timezone offset isn't specified tz will be
* +00:00.
*/
bool
parse_time(const char *value, time_t *result)
parse_time(const char *value, time_t *result, bool utc_default)
{
size_t len;
int fields_num,
@ -874,7 +874,7 @@ parse_time(const char *value, time_t *result)
*result = mktime(&tm);
/* adjust time zone */
if (tz_set)
if (tz_set || utc_default)
{
time_t ltime = time(NULL);
struct tm *ptm = gmtime(&ltime);
@ -1053,7 +1053,7 @@ pgut_getopt(int argc, char **argv, pgut_option options[])
size_t len;
len = option_length(options);
longopts = pgut_newarray(struct option, len + 1);
longopts = pgut_newarray(struct option, len + 1 /* zero/end option */);
option_copy(longopts, options, len);
optstring = longopts_to_optstring(longopts, len);
@ -1225,7 +1225,7 @@ get_next_token(const char *src, char *dst, const char *line)
}
else
{
i = j = strcspn(s, "# \n\r\t\v");
i = j = strcspn(s, "#\n\r\t\v");
memcpy(dst, s, j);
}

View File

@ -15,7 +15,6 @@
#include "pqexpbuffer.h"
#include <assert.h>
#include <pthread.h>
#include <sys/time.h>
#include "logger.h"
@ -59,7 +58,7 @@ typedef enum pgut_optsrc
typedef struct pgut_option
{
char type;
char sname; /* short name */
uint8 sname; /* short name */
const char *lname; /* long name */
void *var; /* pointer to variable */
pgut_optsrc allowed; /* allowed source */
@ -94,13 +93,6 @@ extern const char *PROGRAM_VERSION;
extern const char *PROGRAM_URL;
extern const char *PROGRAM_EMAIL;
/* ID of the main thread */
#ifdef WIN32
extern DWORD main_tid;
#else
extern pthread_t main_tid;
#endif
extern void pgut_help(bool details);
/*
@ -212,7 +204,7 @@ extern bool parse_int32(const char *value, int32 *result, int flags);
extern bool parse_uint32(const char *value, uint32 *result, int flags);
extern bool parse_int64(const char *value, int64 *result, int flags);
extern bool parse_uint64(const char *value, uint64 *result, int flags);
extern bool parse_time(const char *value, time_t *result);
extern bool parse_time(const char *value, time_t *result, bool utc_default);
extern bool parse_int(const char *value, int *result, int flags,
const char **hintmsg);

81
src/utils/thread.c Normal file
View File

@ -0,0 +1,81 @@
/*-------------------------------------------------------------------------
*
* thread.c: - multi-platform pthread implementations.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#include "thread.h"
pthread_t main_tid = 0;
#ifdef WIN32
#include <errno.h>
typedef struct win32_pthread
{
HANDLE handle;
void *(*routine) (void *);
void *arg;
void *result;
} win32_pthread;
static unsigned __stdcall
win32_pthread_run(void *arg)
{
win32_pthread *th = (win32_pthread *)arg;
th->result = th->routine(th->arg);
return 0;
}
int
pthread_create(pthread_t *thread,
pthread_attr_t *attr,
void *(*start_routine) (void *),
void *arg)
{
int save_errno;
win32_pthread *th;
th = (win32_pthread *)pg_malloc(sizeof(win32_pthread));
th->routine = start_routine;
th->arg = arg;
th->result = NULL;
th->handle = (HANDLE)_beginthreadex(NULL, 0, win32_pthread_run, th, 0, NULL);
if (th->handle == NULL)
{
save_errno = errno;
free(th);
return save_errno;
}
*thread = th;
return 0;
}
int
pthread_join(pthread_t th, void **thread_return)
{
if (th == NULL || th->handle == NULL)
return errno = EINVAL;
if (WaitForSingleObject(th->handle, INFINITE) != WAIT_OBJECT_0)
{
_dosmaperr(GetLastError());
return errno;
}
if (thread_return)
*thread_return = th->result;
CloseHandle(th->handle);
free(th);
return 0;
}
#endif /* WIN32 */

33
src/utils/thread.h Normal file
View File

@ -0,0 +1,33 @@
/*-------------------------------------------------------------------------
*
* thread.h: - multi-platform pthread implementations.
*
* Copyright (c) 2018, Postgres Professional
*
*-------------------------------------------------------------------------
*/
#ifndef PROBACKUP_THREAD_H
#define PROBACKUP_THREAD_H
#ifdef WIN32
#include "postgres_fe.h"
#include "port/pthread-win32.h"
/* Use native win32 threads on Windows */
typedef struct win32_pthread *pthread_t;
typedef int pthread_attr_t;
#define PTHREAD_MUTEX_INITIALIZER NULL //{ NULL, 0 }
#define PTHREAD_ONCE_INIT false
extern int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
extern int pthread_join(pthread_t th, void **thread_return);
#else
/* Use platform-dependent pthread capability */
#include <pthread.h>
#endif
extern pthread_t main_tid;
#endif /* PROBACKUP_THREAD_H */

View File

@ -11,10 +11,11 @@
#include "pg_probackup.h"
#include <sys/stat.h>
#include <pthread.h>
#include <dirent.h>
static void pgBackupValidateFiles(void *arg);
#include "utils/thread.h"
static void *pgBackupValidateFiles(void *arg);
static void do_validate_instance(void);
static bool corrupted_backup_found = false;
@ -42,8 +43,9 @@ pgBackupValidate(pgBackup *backup)
parray *files;
bool corrupted = false;
bool validation_isok = true;
pthread_t validate_threads[num_threads];
validate_files_args *validate_threads_args[num_threads];
/* arrays with meta info for multi threaded validate */
pthread_t *validate_threads;
validate_files_args *validate_threads_args;
int i;
/* Revalidation is attempted for DONE, ORPHAN and CORRUPT backups */
@ -77,36 +79,44 @@ pgBackupValidate(pgBackup *backup)
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
__sync_lock_release(&file->lock);
pg_atomic_clear_flag(&file->lock);
}
/* init thread args with own file lists */
validate_threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
validate_threads_args = (validate_files_args *)
palloc(sizeof(validate_files_args) * num_threads);
/* Validate files */
for (i = 0; i < num_threads; i++)
{
validate_files_args *arg = pg_malloc(sizeof(validate_files_args));
validate_files_args *arg = &(validate_threads_args[i]);
arg->files = files;
arg->corrupted = false;
/* By default there are some error */
arg->ret = 1;
validate_threads_args[i] = arg;
pthread_create(&validate_threads[i], NULL,
(void *(*)(void *)) pgBackupValidateFiles, arg);
pthread_create(&validate_threads[i], NULL, pgBackupValidateFiles, arg);
}
/* Wait theads */
for (i = 0; i < num_threads; i++)
{
validate_files_args *arg = &(validate_threads_args[i]);
pthread_join(validate_threads[i], NULL);
if (validate_threads_args[i]->corrupted)
if (arg->corrupted)
corrupted = true;
if (validate_threads_args[i]->ret == 1)
if (arg->ret == 1)
validation_isok = false;
pg_free(validate_threads_args[i]);
}
if (!validation_isok)
elog(ERROR, "Data files validation failed");
pfree(validate_threads);
pfree(validate_threads_args);
/* cleanup */
parray_walk(files, pgFileFree);
parray_free(files);
@ -127,10 +137,10 @@ pgBackupValidate(pgBackup *backup)
* rather throw a WARNING and set arguments->corrupted = true.
* This is necessary to update backup status.
*/
static void
static void *
pgBackupValidateFiles(void *arg)
{
int i;
int i;
validate_files_args *arguments = (validate_files_args *)arg;
pg_crc32 crc;
@ -139,7 +149,7 @@ pgBackupValidateFiles(void *arg)
struct stat st;
pgFile *file = (pgFile *) parray_get(arguments->files, i);
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
if (!pg_atomic_test_set_flag(&file->lock))
continue;
if (interrupted)
@ -179,9 +189,8 @@ pgBackupValidateFiles(void *arg)
if (file->write_size != st.st_size)
{
elog(WARNING, "Invalid size of backup file \"%s\" : %lu. Expected %lu",
file->path, (unsigned long) file->write_size,
(unsigned long) st.st_size);
elog(WARNING, "Invalid size of backup file \"%s\" : " INT64_FORMAT ". Expected %lu",
file->path, file->write_size, (unsigned long) st.st_size);
arguments->corrupted = true;
break;
}
@ -198,6 +207,8 @@ pgBackupValidateFiles(void *arg)
/* Data files validation is successful */
arguments->ret = 0;
return NULL;
}
/*
@ -267,7 +278,7 @@ do_validate_all(void)
static void
do_validate_instance(void)
{
char *current_backup_id;
char *current_backup_id;
int i;
parray *backups;
pgBackup *current_backup = NULL;

View File

@ -60,6 +60,10 @@ def load_tests(loader, tests, pattern):
# ptrack backup on replica should work correctly
# archive:
# immediate recovery and full recovery
# backward compatibility:
# previous version catalog must be readable by newer version
# incremental chain from previous version can be continued
# backups from previous version can be restored
# 10vanilla_1.3ptrack +
# 10vanilla+
# 9.6vanilla_1.3ptrack +

View File

@ -29,7 +29,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
node.safe_psql(
"postgres",
@ -45,11 +45,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node)
node.start()
while node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
# Recreate backup calagoue
self.init_pb(backup_dir)
@ -65,11 +61,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=["--recovery-target-action=promote"])
node.start()
while node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
self.assertEqual(
result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
@ -97,7 +89,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
node.slow_start()
# FIRST TIMELINE
node.safe_psql(
@ -117,11 +109,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.start()
while node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
if self.verbose:
print(node.safe_psql(
"postgres",
@ -152,11 +141,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.start()
while node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
if self.verbose:
print(
@ -184,11 +169,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.start()
while node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
if self.verbose:
print('Fourth timeline')
print(node.safe_psql(
@ -200,10 +182,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
if self.verbose:
print('Fifth timeline')
print(node.safe_psql(
@ -215,10 +195,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(
backup_dir, 'node', node,
options=['--immediate', '--recovery-target-action=promote'])
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
sleep(1)
node.slow_start()
if self.verbose:
print('Sixth timeline')
print(node.safe_psql(
@ -269,7 +247,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
node.append_conf(
'postgresql.auto.conf', "archive_command = '{0} %p %f'".format(
archive_script_path))
node.start()
node.slow_start()
try:
self.backup_node(
backup_dir, 'node', node,
@ -330,7 +308,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
f.flush()
f.close()
node.start()
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
@ -390,7 +368,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
f.flush()
f.close()
node.start()
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
@ -426,7 +404,11 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_replica_archive(self):
"""make node without archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
"""
make node without archiving, take stream backup and
turn it into replica, set replica with archiving,
make archive backup from replica
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
@ -441,7 +423,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.init_pb(backup_dir)
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)
master.start()
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
@ -459,15 +441,18 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# Settings for Replica
self.restore_node(backup_dir, 'master', replica)
self.set_replica(master, replica, synchronous=True)
self.add_instance(backup_dir, 'replica', replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
replica.slow_start(replica=True)
# Check data correctness on replica
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Change data on master, take FULL backup from replica, restore taken backup and check that restored data equal to original data
# Change data on master, take FULL backup from replica,
# restore taken backup and check that restored data equal
# to original data
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
@ -496,12 +481,14 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
# Change data on master, make PAGE backup from replica, restore taken backup and check that restored data equal to original data
# Change data on master, make PAGE backup from replica,
# restore taken backup and check that restored data equal
# to original data
master.psql(
"postgres",
"insert into t_heap as select i as id, md5(i::text) as text, "
@ -526,7 +513,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -560,7 +547,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)
master.start()
master.slow_start()
master.psql(
"postgres",
@ -586,7 +573,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.add_instance(backup_dir, 'replica', replica)
# SET ARCHIVING FOR REPLICA
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
replica.slow_start(replica=True)
# CHECK LOGICAL CORRECTNESS on REPLICA
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
@ -641,7 +628,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)
master.start()
master.slow_start()
master.psql(
"postgres",
@ -668,7 +655,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# self.add_instance(backup_dir, 'replica', replica)
# SET ARCHIVING FOR REPLICA
# self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start()
replica.slow_start(replica=True)
# CHECK LOGICAL CORRECTNESS on REPLICA
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
@ -763,7 +750,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# Check data correctness
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.start()
node.slow_start()
self.assertEqual(
result,
node.safe_psql(
@ -795,7 +783,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
if self.get_version(node) < self.version_to_num('10.0'):
return unittest.skip('You need PostgreSQL 10 for this test')
else:
pg_receivexlog_path = node.get_bin_path('pg_receivewal')
pg_receivexlog_path = self.get_bin_path('pg_receivewal')
pg_receivexlog = self.run_binary(
[
@ -834,7 +822,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# Check data correctness
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.start()
node.slow_start()
self.assertEqual(
result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
'data after restore not equal to original data')

View File

@ -29,15 +29,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'node', node)
node.start()
# full backup mode
# with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
# backup_log.write(self.backup_node(node, options=["--verbose"]))
backup_id = self.backup_node(backup_dir, 'node', node)
show_backup = self.show_pb(backup_dir, 'node')[0]
self.assertEqual(show_backup['Status'], "OK")
self.assertEqual(show_backup['Mode'], "FULL")
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "FULL")
# postmaster.pid and postmaster.opts shouldn't be copied
excluded = True
@ -61,29 +57,29 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# print self.show_pb(node)
show_backup = self.show_pb(backup_dir, 'node')[1]
self.assertEqual(show_backup['Status'], "OK")
self.assertEqual(show_backup['Mode'], "PAGE")
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PAGE")
# Check parent backup
self.assertEqual(
backup_id,
self.show_pb(
backup_dir, 'node',
backup_id=show_backup['ID'])["parent-backup-id"])
backup_id=show_backup['id'])["parent-backup-id"])
# ptrack backup mode
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
show_backup = self.show_pb(backup_dir, 'node')[2]
self.assertEqual(show_backup['Status'], "OK")
self.assertEqual(show_backup['Mode'], "PTRACK")
self.assertEqual(show_backup['status'], "OK")
self.assertEqual(show_backup['backup-mode'], "PTRACK")
# Check parent backup
self.assertEqual(
page_backup_id,
self.show_pb(
backup_dir, 'node',
backup_id=show_backup['ID'])["parent-backup-id"])
backup_id=show_backup['id'])["parent-backup-id"])
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -106,7 +102,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.backup_node(
backup_dir, 'node', node,
options=["-C"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
node.stop()
# Clean after yourself
@ -162,7 +158,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
repr(e.message), self.cmd))
self.assertEqual(
self.show_pb(backup_dir, 'node')[0]['Status'],
self.show_pb(backup_dir, 'node')[0]['status'],
"ERROR")
# Clean after yourself
@ -227,7 +223,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.assertEqual(
self.show_pb(backup_dir, 'node', backup_id)['status'], "CORRUPT")
self.assertEqual(
self.show_pb(backup_dir, 'node')[1]['Status'], "ERROR")
self.show_pb(backup_dir, 'node')[1]['status'], "ERROR")
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -250,12 +246,12 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.backup_node(
backup_dir, 'node', node,
backup_type="full", options=["-j", "4"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
self.backup_node(
backup_dir, 'node', node,
backup_type="ptrack", options=["-j", "4"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -282,11 +278,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"])
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
self.backup_node(
backup_dir, 'node', node,
backup_type="ptrack", options=["-j", "4", "--stream"])
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -342,7 +338,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
f.close
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK',
self.show_pb(backup_dir, 'node')[1]['status'] == 'OK',
"Backup Status should be OK")
# Clean after yourself
@ -415,7 +411,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
repr(e.message), self.cmd))
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['Status'] == 'ERROR',
self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR',
"Backup Status should be ERROR")
# Clean after yourself

View File

@ -83,7 +83,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -98,7 +99,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -113,7 +115,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()
@ -187,7 +190,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -202,7 +206,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -217,7 +222,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()
@ -294,7 +300,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -309,7 +316,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -324,7 +332,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()
@ -401,7 +410,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -416,7 +426,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -431,7 +442,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
node.cleanup()

View File

@ -44,13 +44,13 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
self.backup_node(backup_dir, 'node', node)
show_backups = self.show_pb(backup_dir, 'node')
id_1 = show_backups[0]['ID']
id_2 = show_backups[1]['ID']
id_3 = show_backups[2]['ID']
id_1 = show_backups[0]['id']
id_2 = show_backups[1]['id']
id_3 = show_backups[2]['id']
self.delete_pb(backup_dir, 'node', id_2)
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(show_backups[0]['ID'], id_1)
self.assertEqual(show_backups[1]['ID'], id_3)
self.assertEqual(show_backups[0]['id'], id_1)
self.assertEqual(show_backups[1]['id'], id_3)
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -82,15 +82,15 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
self.assertEqual(len(show_backups), 4)
# delete first page backup
self.delete_pb(backup_dir, 'node', show_backups[1]['ID'])
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 2)
self.assertEqual(show_backups[0]['Mode'], "FULL")
self.assertEqual(show_backups[0]['Status'], "OK")
self.assertEqual(show_backups[1]['Mode'], "FULL")
self.assertEqual(show_backups[1]['Status'], "OK")
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
self.assertEqual(show_backups[0]['status'], "OK")
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
self.assertEqual(show_backups[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -122,15 +122,15 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
self.assertEqual(len(show_backups), 4)
# delete first page backup
self.delete_pb(backup_dir, 'node', show_backups[1]['ID'])
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
show_backups = self.show_pb(backup_dir, 'node')
self.assertEqual(len(show_backups), 2)
self.assertEqual(show_backups[0]['Mode'], "FULL")
self.assertEqual(show_backups[0]['Status'], "OK")
self.assertEqual(show_backups[1]['Mode'], "FULL")
self.assertEqual(show_backups[1]['Status'], "OK")
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
self.assertEqual(show_backups[0]['status'], "OK")
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
self.assertEqual(show_backups[1]['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -508,10 +508,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
while restored_node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
restored_node.slow_start()
result_new = restored_node.safe_psql(
"postgres", "select * from pgbench_accounts")
@ -946,11 +943,8 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start()
node_restored.slow_start()
while node_restored.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
result_new = node_restored.safe_psql(
"postgres", "select * from t_heap")
@ -1191,7 +1185,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
f.close
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK',
self.show_pb(backup_dir, 'node')[1]['status'] == 'OK',
"Backup Status should be OK")
# Clean after yourself
@ -1264,7 +1258,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
repr(e.message), self.cmd))
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['Status'] == 'ERROR',
self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR',
"Backup Status should be ERROR")
# Clean after yourself

View File

@ -43,25 +43,33 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
"WHERE oid = pg_my_temp_schema()")[0][0]
conn.commit()
temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace("pg_", "")
temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace(
"pg_", "")
conn.commit()
conn.execute("create index test_idx on test (generate_series)")
conn.commit()
heap_path = conn.execute("select pg_relation_filepath('test')")[0][0]
heap_path = conn.execute(
"select pg_relation_filepath('test')")[0][0]
conn.commit()
index_path = conn.execute("select pg_relation_filepath('test_idx')")[0][0]
index_path = conn.execute(
"select pg_relation_filepath('test_idx')")[0][0]
conn.commit()
heap_oid = conn.execute("select 'test'::regclass::oid")[0][0]
conn.commit()
toast_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0]
toast_path = conn.execute(
"select pg_relation_filepath('{0}.{1}')".format(
temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0]
conn.commit()
toast_idx_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid) + "_index"))[0][0]
toast_idx_path = conn.execute(
"select pg_relation_filepath('{0}.{1}')".format(
temp_toast_schema_name,
"pg_toast_" + str(heap_oid) + "_index"))[0][0]
conn.commit()
temp_table_filename = os.path.basename(heap_path)

View File

@ -25,6 +25,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--replica-timeout=timeout]
pg_probackup show-config -B backup-dir --instance=instance_name
[--format=format]
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
[-C] [--stream [-S slot-name]] [--backup-pg-log]
@ -61,10 +62,12 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
pg_probackup validate -B backup-dir [--instance=instance_name]
[-i backup-id] [--progress]
[--time=time|--xid=xid [--inclusive=boolean]]
[--recovery-target-name=target-name]
[--timeline=timeline]
pg_probackup show -B backup-dir
[--instance=instance_name [-i backup-id]]
[--format=format]
pg_probackup delete -B backup-dir --instance=instance_name
[--wal] [-i backup-id | --expired]

View File

@ -191,7 +191,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
node.slow_start()
# Logical comparison
self.assertEqual(
result,
@ -290,7 +290,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
node.slow_start()
# Logical comparison
self.assertEqual(
result,

View File

@ -12,6 +12,7 @@ import select
import psycopg2
from time import sleep
import re
import json
idx_ptrack = {
't_heap': {
@ -111,6 +112,39 @@ class ProbackupException(Exception):
return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd)
def slow_start(self, replica=False):
# wait for https://github.com/postgrespro/testgres/pull/50
# self.poll_query_until(
# "postgres",
# "SELECT not pg_is_in_recovery()",
# raise_operational_error=False)
self.start()
if not replica:
while True:
try:
self.poll_query_until(
"postgres",
"SELECT not pg_is_in_recovery()")
break
except Exception as e:
continue
else:
self.poll_query_until(
"postgres",
"SELECT pg_is_in_recovery()")
# while True:
# try:
# self.poll_query_until(
# "postgres",
# "SELECT pg_is_in_recovery()")
# break
# except ProbackupException as e:
# continue
class ProbackupTest(object):
# Class attributes
enterprise = is_enterprise()
@ -204,6 +238,8 @@ class ProbackupTest(object):
os.makedirs(real_base_dir)
node = testgres.get_new_node('test', base_dir=real_base_dir)
# bound method slow_start() to 'node' class instance
node.slow_start = slow_start.__get__(node)
node.should_rm_dirs = True
node.init(
initdb_params=initdb_params, allow_streaming=set_replication)
@ -598,7 +634,7 @@ class ProbackupTest(object):
def show_pb(
self, backup_dir, instance=None, backup_id=None,
options=[], as_text=False
options=[], as_text=False, as_json=True
):
backup_list = []
@ -613,63 +649,83 @@ class ProbackupTest(object):
if backup_id:
cmd_list += ["-i", backup_id]
if as_json:
cmd_list += ["--format=json"]
if as_text:
# You should print it when calling as_text=true
return self.run_pb(cmd_list + options)
# get show result as list of lines
show_splitted = self.run_pb(cmd_list + options).splitlines()
if instance is not None and backup_id is None:
# cut header(ID, Mode, etc) from show as single string
header = show_splitted[1:2][0]
# cut backup records from show as single list
# with string for every backup record
body = show_splitted[3:]
# inverse list so oldest record come first
body = body[::-1]
# split string in list with string for every header element
header_split = re.split(" +", header)
# Remove empty items
for i in header_split:
if i == '':
header_split.remove(i)
if as_json:
data = json.loads(self.run_pb(cmd_list + options))
# print(data)
for instance_data in data:
# find specific instance if requested
if instance and instance_data['instance'] != instance:
continue
header_split = [
header_element.rstrip() for header_element in header_split
]
for backup_record in body:
backup_record = backup_record.rstrip()
# split list with str for every backup record element
backup_record_split = re.split(" +", backup_record)
# Remove empty items
for i in backup_record_split:
if i == '':
backup_record_split.remove(i)
if len(header_split) != len(backup_record_split):
print(warning.format(
header=header, body=body,
header_split=header_split,
body_split=backup_record_split)
)
exit(1)
new_dict = dict(zip(header_split, backup_record_split))
backup_list.append(new_dict)
for backup in reversed(instance_data['backups']):
# find specific backup if requested
if backup_id:
if backup['id'] == backup_id:
return backup
else:
backup_list.append(backup)
return backup_list
else:
# cut out empty lines and lines started with #
# and other garbage then reconstruct it as dictionary
# print show_splitted
sanitized_show = [item for item in show_splitted if item]
sanitized_show = [
item for item in sanitized_show if not item.startswith('#')
]
# print sanitized_show
for line in sanitized_show:
name, var = line.partition(" = ")[::2]
var = var.strip('"')
var = var.strip("'")
specific_record[name.strip()] = var
return specific_record
show_splitted = self.run_pb(cmd_list + options).splitlines()
if instance is not None and backup_id is None:
# cut header(ID, Mode, etc) from show as single string
header = show_splitted[1:2][0]
# cut backup records from show as single list
# with string for every backup record
body = show_splitted[3:]
# inverse list so oldest record come first
body = body[::-1]
# split string in list with string for every header element
header_split = re.split(" +", header)
# Remove empty items
for i in header_split:
if i == '':
header_split.remove(i)
continue
header_split = [
header_element.rstrip() for header_element in header_split
]
for backup_record in body:
backup_record = backup_record.rstrip()
# split list with str for every backup record element
backup_record_split = re.split(" +", backup_record)
# Remove empty items
for i in backup_record_split:
if i == '':
backup_record_split.remove(i)
if len(header_split) != len(backup_record_split):
print(warning.format(
header=header, body=body,
header_split=header_split,
body_split=backup_record_split)
)
exit(1)
new_dict = dict(zip(header_split, backup_record_split))
backup_list.append(new_dict)
return backup_list
else:
# cut out empty lines and lines started with #
# and other garbage then reconstruct it as dictionary
# print show_splitted
sanitized_show = [item for item in show_splitted if item]
sanitized_show = [
item for item in sanitized_show if not item.startswith('#')
]
# print sanitized_show
for line in sanitized_show:
name, var = line.partition(" = ")[::2]
var = var.strip('"')
var = var.strip("'")
specific_record[name.strip()] = var
return specific_record
def validate_pb(
self, backup_dir, instance=None,

View File

@ -3,7 +3,6 @@ import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from datetime import datetime, timedelta
import subprocess
import time
module_name = 'page'
@ -33,8 +32,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
}
)
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
)
base_dir="{0}/{1}/node_restored".format(module_name, fname))
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
@ -49,32 +47,27 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"create table t_heap tablespace somedata as select i as id, "
"md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1024) i;"
)
"from generate_series(0,1024) i;")
node.safe_psql(
"postgres",
"vacuum t_heap"
)
"vacuum t_heap")
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"delete from t_heap where ctid >= '(11,0)'"
)
"delete from t_heap where ctid >= '(11,0)'")
node.safe_psql(
"postgres",
"vacuum t_heap"
)
"vacuum t_heap")
self.backup_node(
backup_dir, 'node', node, backup_type='page',
options=['--log-level-file=verbose']
)
options=['--log-level-file=verbose'])
self.backup_node(
backup_dir, 'node', node, backup_type='page'
)
backup_dir, 'node', node, backup_type='page')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
@ -87,8 +80,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
options=[
"-j", "4",
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
"--recovery-target-action=promote"]
)
"--recovery-target-action=promote"])
# Physical comparison
if self.paranoia:
@ -97,21 +89,17 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.start()
while node_restored.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node_restored.slow_start()
# Logical comparison
result1 = node.safe_psql(
"postgres",
"select * from t_heap"
)
"select * from t_heap")
result2 = node_restored.safe_psql(
"postgres",
"select * from t_heap"
)
"select * from t_heap")
self.assertEqual(result1, result2)
# Clean after yourself
@ -175,7 +163,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
backup_id=full_backup_id, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -188,7 +176,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
backup_id=page_backup_id, options=["-j", "4"]),
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -254,7 +242,8 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -271,7 +260,8 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
node.slow_start()
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
self.assertEqual(page_result, page_result_new)
node.cleanup()
@ -349,10 +339,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
while restored_node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
restored_node.slow_start()
result_new = restored_node.safe_psql(
"postgres", "select * from pgbench_accounts")

View File

@ -63,7 +63,7 @@ class ArchiveCheck(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
backup_id = self.show_pb(backup_dir, 'node')[0]['ID']
backup_id = self.show_pb(backup_dir, 'node')[0]['id']
self.assertEqual(
'ERROR', self.show_pb(backup_dir, 'node', backup_id)['status'],
'Backup should have ERROR status')

View File

@ -268,7 +268,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
@ -430,7 +431,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node_restored.append_conf(
@ -503,7 +505,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# Physical comparison
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
@ -583,10 +586,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd)
)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -604,13 +604,11 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
)
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
@ -687,10 +685,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd)
)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(full_result, full_result_new)
node.cleanup()
@ -711,13 +707,11 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
)
if self.paranoia:
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(ptrack_result, ptrack_result_new)
@ -811,7 +805,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_page_pgpro417(self):
"""Make archive node, take full backup, take page backup, delete page backup. Try to take ptrack backup, which should fail"""
"""
Make archive node, take full backup, take page backup,
delete page backup. Try to take ptrack backup, which should fail
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -880,7 +877,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_full_pgpro417(self):
"""Make node, take two full backups, delete full second backup. Try to take ptrack backup, which should fail"""
"""
Make node, take two full backups, delete full second backup.
Try to take ptrack backup, which should fail
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -954,7 +954,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_create_db(self):
"""Make node, take full backup, create database db1, take ptrack backup, restore database and check it presense"""
"""
Make node, take full backup, create database db1, take ptrack backup,
restore database and check it presense
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -1017,7 +1020,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
@ -1046,7 +1050,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
@ -1151,17 +1156,15 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# GET RESTORED PGDATA AND COMPARE
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
node_restored.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
node_restored.start()
node_restored.slow_start()
while node_restored.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
result_new = node_restored.safe_psql(
"postgres", "select * from t_heap")
@ -1229,7 +1232,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# GET PHYSICAL CONTENT and COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
@ -1240,7 +1244,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_drop_tablespace(self):
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
"""
Make node, create table, alter table tablespace, take ptrack backup,
move table from tablespace, take ptrack backup
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -1321,7 +1328,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_ptrack_alter_tablespace(self):
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
"""
Make node, create table, alter table tablespace, take ptrack backup,
move table from tablespace, take ptrack backup
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -1379,16 +1389,14 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
restored_node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
while restored_node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
restored_node.slow_start()
# COMPARE LOGICAL CONTENT
result_new = restored_node.safe_psql(
@ -1416,17 +1424,14 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
restored_node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
while restored_node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
time.sleep(1)
restored_node.slow_start()
result_new = restored_node.safe_psql(
"postgres", "select * from t_heap")
@ -1437,7 +1442,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_ptrack_multiple_segments(self):
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
"""
Make node, create table, alter table tablespace,
take ptrack backup, move table from tablespace, take ptrack backup
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
@ -1446,9 +1454,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica', 'max_wal_senders': '2',
'ptrack_enable': 'on', 'fsync': 'off', 'shared_buffers': '128MB',
'maintenance_work_mem': '1GB', 'autovacuum': 'off',
'full_page_writes': 'off'}
'ptrack_enable': 'on', 'fsync': 'off',
'autovacuum': 'off',
'full_page_writes': 'off'
}
)
self.init_pb(backup_dir)
@ -1514,17 +1523,14 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
# GET PHYSICAL CONTENT FROM NODE_RESTORED
if self.paranoia:
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
pgdata_restored = self.pgdata_content(
restored_node.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
restored_node.append_conf(
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
restored_node.start()
while restored_node.safe_psql(
"postgres",
"select pg_is_in_recovery()") == 't\n':
time.sleep(1)
restored_node.slow_start()
result_new = restored_node.safe_psql(
"postgres",

View File

@ -46,7 +46,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'cluster t_heap using t_btree')
@ -103,7 +103,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'cluster t_heap using t_gist')
@ -172,7 +172,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream',
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
@ -242,7 +242,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream',
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')

View File

@ -45,7 +45,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
@ -100,7 +100,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])

View File

@ -45,7 +45,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Make full backup to clean every ptrack
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
for i in idx_ptrack:
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])

View File

@ -43,7 +43,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'vacuum freeze t_heap')
node.safe_psql('postgres', 'checkpoint')
@ -111,7 +111,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Take PTRACK backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j100',
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'vacuum freeze t_heap')

View File

@ -44,7 +44,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
node.safe_psql('postgres', 'vacuum full t_heap')

View File

@ -44,7 +44,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
node.safe_psql('postgres', 'delete from t_heap where id > 128;')
node.safe_psql('postgres', 'vacuum t_heap')
@ -116,7 +116,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
# Take PTRACK backup to clean every ptrack
self.backup_node(backup_dir, 'replica', replica, options=['-j100',
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
master.safe_psql('postgres', 'delete from t_heap where id > 128;')

View File

@ -50,7 +50,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.set_replica(master, replica)
# Check data correctness on replica
replica.start(["-t", "600"])
replica.slow_start(replica=True)
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -82,7 +82,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -113,7 +113,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -143,7 +143,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'master', master)
# force more frequent wal switch
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
master.start()
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
@ -166,7 +166,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# Settings for Replica
self.set_replica(master, replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.start(["-t", "600"])
replica.slow_start(replica=True)
# Check data correctness on replica
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
@ -200,7 +200,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -231,7 +231,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
node.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node.port))
node.start()
node.slow_start()
# CHECK DATA CORRECTNESS
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
self.assertEqual(before, after)
@ -260,7 +260,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'master', master)
# force more frequent wal switch
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
master.start()
master.slow_start()
replica = self.make_simple_node(
base_dir="{0}/{1}/replica".format(module_name, fname))
@ -287,15 +287,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'replica', replica, replica=True)
replica.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
replica.start(["-t", "600"])
time.sleep(1)
self.assertEqual(
master.safe_psql(
"postgres",
"select exists(select * from pg_stat_replication)"
).rstrip(),
't')
replica.start()
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -53,10 +53,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
recovery_conf = os.path.join(node.data_dir, "recovery.conf")
self.assertEqual(os.path.isfile(recovery_conf), True)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -104,10 +101,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -149,11 +143,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start(params=['-t', '10'])
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
pgbench = node.pgbench(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
options=['-T', '10', '-c', '2', '--no-vacuum'])
@ -181,11 +171,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node)["recovery_target_timeline"]
self.assertEqual(int(recovery_target_timeline), target_tli)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -234,11 +220,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -297,11 +279,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
self.assertEqual(
@ -366,11 +344,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
self.assertEqual(
@ -420,11 +394,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -479,11 +449,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -535,11 +501,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
self.assertEqual(before, after)
@ -602,11 +564,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
bbalance = node.execute(
"postgres", "SELECT sum(bbalance) FROM pgbench_branches")
delta = node.execute(
@ -674,11 +632,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
options=["-j", "4", "--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
bbalance = node.execute(
"postgres", "SELECT sum(bbalance) FROM pgbench_branches")
delta = node.execute(
@ -718,7 +672,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
con.commit()
backup_id = self.backup_node(backup_dir, 'node', node)
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
# 1 - Try to restore to existing directory
node.stop()
@ -769,10 +723,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.execute("postgres", "SELECT id FROM test")
self.assertEqual(result[0][0], 1)
@ -785,8 +737,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type="page")
show_pb = self.show_pb(backup_dir, 'node')
self.assertEqual(show_pb[1]['Status'], "OK")
self.assertEqual(show_pb[2]['Status'], "OK")
self.assertEqual(show_pb[1]['status'], "OK")
self.assertEqual(show_pb[2]['status'], "OK")
node.stop()
node.cleanup()
@ -802,10 +754,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.execute("postgres", "SELECT id FROM test OFFSET 1")
self.assertEqual(result[0][0], 2)
@ -829,7 +778,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Full backup
self.backup_node(backup_dir, 'node', node)
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
# Create tablespace
tblspc_path = os.path.join(node.base_dir, "tblspc")
@ -845,8 +794,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# First page backup
self.backup_node(backup_dir, 'node', node, backup_type="page")
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Mode'], "PAGE")
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")
self.assertEqual(
self.show_pb(backup_dir, 'node')[1]['backup-mode'], "PAGE")
# Create tablespace table
with node.connect("postgres") as con:
@ -862,8 +812,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Second page backup
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type="page")
self.assertEqual(self.show_pb(backup_dir, 'node')[2]['Status'], "OK")
self.assertEqual(self.show_pb(backup_dir, 'node')[2]['Mode'], "PAGE")
self.assertEqual(self.show_pb(backup_dir, 'node')[2]['status'], "OK")
self.assertEqual(
self.show_pb(backup_dir, 'node')[2]['backup-mode'], "PAGE")
node.stop()
node.cleanup()
@ -879,10 +830,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"--recovery-target-action=promote"]),
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
count = node.execute("postgres", "SELECT count(*) FROM tbl")
self.assertEqual(count[0][0], 4)
@ -933,10 +881,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.psql("postgres", 'select * from t_heap')
self.assertTrue('does not exist' in result[2].decode("utf-8"))
@ -985,11 +930,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.psql("postgres", 'select * from t_heap')
self.assertTrue('does not exist' in result[2].decode("utf-8"))
@ -1037,10 +978,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(self.output), self.cmd))
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.psql("postgres", 'select * from t_heap')
self.assertEqual(True, 'does not exist' in result[2].decode("utf-8"))
@ -1095,10 +1033,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result = node.psql("postgres", 'select * from t_heap')
self.assertTrue('does not exist' in result[2].decode("utf-8"))
@ -1147,10 +1082,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
"--recovery-target-name=savepoint",
"--recovery-target-action=promote"])
node.start()
while node.safe_psql(
"postgres", "select pg_is_in_recovery()") == 't\n':
time.sleep(1)
node.slow_start()
result_new = node.safe_psql("postgres", "select * from t_heap")
res = node.psql("postgres", "select * from t_heap_1")

View File

@ -14,7 +14,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
def test_retention_redundancy_1(self):
"""purge backups using redundancy-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
@ -24,7 +25,9 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
self.set_archiving(backup_dir, 'node', node)
node.start()
with open(os.path.join(backup_dir, 'backups', 'node', "pg_probackup.conf"), "a") as conf:
with open(os.path.join(
backup_dir, 'backups', 'node',
"pg_probackup.conf"), "a") as conf:
conf.write("retention-redundancy = 1\n")
# Make backups to be purged
@ -57,7 +60,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
for wal_name in os.listdir(os.path.join(backup_dir, 'wal', 'node')):
if not wal_name.endswith(".backup"):
#wal_name_b = wal_name.encode('ascii')
# wal_name_b = wal_name.encode('ascii')
self.assertEqual(wal_name[8:] > min_wal[8:], True)
self.assertEqual(wal_name[8:] > max_wal[8:], True)
@ -68,7 +71,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
def test_retention_window_2(self):
"""purge backups using window-based retention policy"""
fname = self.id().split('.')[3]
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)

View File

@ -36,6 +36,35 @@ class OptionTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_show_json(self):
"""Status DONE and OK"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.assertEqual(
self.backup_node(
backup_dir, 'node', node,
options=["--log-level-console=panic"]),
None
)
self.backup_node(backup_dir, 'node', node)
self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_corrupt_2(self):
"""Status CORRUPT"""

View File

@ -995,7 +995,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
backup_id = self.backup_node(backup_dir, 'node', node)
target_xid = None
with node.connect("postgres") as con:
res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
res = con.execute(
"INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
con.commit()
target_xid = res[0][0]
@ -1128,7 +1129,10 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
# @unittest.skip("skip")
def test_validate_corrupt_wal_between_backups(self):
"""make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors"""
"""
make archive node, make full backup, corrupt all wal files,
run validate to real xid, expect errors
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
@ -1170,7 +1174,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
else:
walfile = node.safe_psql(
'postgres',
'select pg_walfile_name(pg_current_wal_location())').rstrip()
'select pg_walfile_name(pg_current_wal_lsn())').rstrip()
if self.archive_compress:
walfile = walfile + '.gz'
@ -1221,12 +1225,12 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[0]['Status'],
self.show_pb(backup_dir, 'node')[0]['status'],
'Backup STATUS should be "OK"')
self.assertEqual(
'OK',
self.show_pb(backup_dir, 'node')[1]['Status'],
self.show_pb(backup_dir, 'node')[1]['status'],
'Backup STATUS should be "OK"')
# Clean after yourself
@ -1295,7 +1299,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
self.assertEqual(
'ERROR',
self.show_pb(backup_dir, 'node')[1]['Status'],
self.show_pb(backup_dir, 'node')[1]['status'],
'Backup {0} should have STATUS "ERROR"')
# Clean after yourself
@ -1403,7 +1407,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir)
node2.append_conf(
'postgresql.auto.conf', 'port = {0}'.format(node2.port))
node2.start()
node2.slow_start()
timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"]
timeline_node2 = node2.get_control_data()["Latest checkpoint's TimeLineID"]
@ -1492,7 +1496,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
except ProbackupException as e:
pass
self.assertTrue(
self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR')
self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR')
self.set_archiving(backup_dir, 'node', node)
node.reload()
self.backup_node(backup_dir, 'node', node, backup_type='page')
@ -1527,14 +1531,19 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'CORRUPT')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR')
self.assertTrue(self.show_pb(backup_dir, 'node')[7]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
self.assertTrue(
self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT')
self.assertTrue(
self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN')
self.assertTrue(
self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN')
self.assertTrue(
self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR')
self.assertTrue(
self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN')
os.rename(file_new, file)
try:
@ -1546,14 +1555,15 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR')
self.assertTrue(self.show_pb(backup_dir, 'node')[7]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK')
self.assertTrue(
self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR')
self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK')
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -1624,13 +1634,13 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'CORRUPT')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN')
os.rename(file_new, file)
file = os.path.join(
@ -1649,13 +1659,13 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'CORRUPT')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ORPHAN')
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK')
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'CORRUPT')
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN')
# Clean after yourself
self.del_test_dir(module_name, fname)

240
win32build.pl Normal file
View File

@ -0,0 +1,240 @@
#!/usr/bin/perl
use JSON;
our $repack_version;
our $pgdir;
our $pgsrc;
if (@ARGV!=2) {
print STDERR "Usage $0 postgress-instalation-root pg-source-dir \n";
exit 1;
}
our $liblist="";
$pgdir = shift @ARGV;
$pgsrc = shift @ARGV if @ARGV;
our $arch = $ENV{'ARCH'} || "x64";
$arch='Win32' if ($arch eq 'x86' || $arch eq 'X86');
$arch='x64' if $arch eq 'X64';
$conffile = $pgsrc."/tools/msvc/config.pl";
die 'Could not find config.pl'
unless (-f $conffile);
our $config;
do $conffile;
if (! -d "$pgdir/bin" || !-d "$pgdir/include" || !-d "$pgdir/lib") {
print STDERR "Directory $pgdir doesn't look like root of postgresql installation\n";
exit 1;
}
our $includepath="";
our $libpath="";
our $libpath32="";
AddProject();
print "\n\n";
print $libpath."\n";
print $includepath."\n";
# open F,"<","META.json" or die "Cannot open META.json: $!\n";
# {
# local $/ = undef;
# $decoded = decode_json(<F>);
# $repack_version= $decoded->{'version'};
# }
# substitute new path in the project files
preprocess_project("./msvs/template.pg_probackup.vcxproj","./msvs/pg_probackup.vcxproj");
exit 0;
sub preprocess_project {
my $in = shift;
my $out = shift;
our $pgdir;
our $adddir;
my $libs;
if (defined $adddir) {
$libs ="$adddir;";
} else{
$libs ="";
}
open IN,"<",$in or die "Cannot open $in: $!\n";
open OUT,">",$out or die "Cannot open $out: $!\n";
# $includepath .= ";";
# $libpath .= ";";
while (<IN>) {
s/\@PGROOT\@/$pgdir/g;
s/\@ADDLIBS\@/$libpath/g;
s/\@ADDLIBS32\@/$libpath32/g;
s/\@PGSRC\@/$pgsrc/g;
s/\@ADDINCLUDE\@/$includepath/g;
print OUT $_;
}
close IN;
close OUT;
}
# my sub
sub AddLibrary
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary32
{
$inc = shift;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary64
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
}
sub AddIncludeDir
{
# my ($self, $inc) = @_;
$inc = shift;
if ($includepath ne '')
{
$includepath .= ';';
}
$includepath .= $inc;
}
sub AddProject
{
# my ($self, $name, $type, $folder, $initialdir) = @_;
if ($config->{zlib})
{
AddIncludeDir($config->{zlib} . '\include');
AddLibrary($config->{zlib} . '\lib\zdll.lib');
}
if ($config->{openssl})
{
AddIncludeDir($config->{openssl} . '\include');
if (-e "$config->{openssl}/lib/VC/ssleay32MD.lib")
{
AddLibrary(
$config->{openssl} . '\lib\VC\ssleay32.lib', 1);
AddLibrary(
$config->{openssl} . '\lib\VC\libeay32.lib', 1);
}
else
{
# We don't expect the config-specific library to be here,
# so don't ask for it in last parameter
AddLibrary(
$config->{openssl} . '\lib\ssleay32.lib', 0);
AddLibrary(
$config->{openssl} . '\lib\libeay32.lib', 0);
}
}
if ($config->{nls})
{
AddIncludeDir($config->{nls} . '\include');
AddLibrary($config->{nls} . '\lib\libintl.lib');
}
if ($config->{gss})
{
AddIncludeDir($config->{gss} . '\inc\krb5');
AddLibrary($config->{gss} . '\lib\i386\krb5_32.lib');
AddLibrary($config->{gss} . '\lib\i386\comerr32.lib');
AddLibrary($config->{gss} . '\lib\i386\gssapi32.lib');
}
if ($config->{iconv})
{
AddIncludeDir($config->{iconv} . '\include');
AddLibrary($config->{iconv} . '\lib\iconv.lib');
}
if ($config->{icu})
{
AddIncludeDir($config->{icu} . '\include');
AddLibrary32($config->{icu} . '\lib\icuin.lib');
AddLibrary32($config->{icu} . '\lib\icuuc.lib');
AddLibrary32($config->{icu} . '\lib\icudt.lib');
AddLibrary64($config->{icu} . '\lib64\icuin.lib');
AddLibrary64($config->{icu} . '\lib64\icuuc.lib');
AddLibrary64($config->{icu} . '\lib64\icudt.lib');
}
if ($config->{xml})
{
AddIncludeDir($config->{xml} . '\include');
AddIncludeDir($config->{xml} . '\include\libxml2');
AddLibrary($config->{xml} . '\lib\libxml2.lib');
}
if ($config->{xslt})
{
AddIncludeDir($config->{xslt} . '\include');
AddLibrary($config->{xslt} . '\lib\libxslt.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
# AddLibrary($config->{libedit} . "\\" .
# ($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
AddLibrary32($config->{libedit} . '\\lib32\edit.lib');
AddLibrary64($config->{libedit} . '\\lib64\edit.lib');
}
if ($config->{uuid})
{
AddIncludeDir($config->{uuid} . '\include');
AddLibrary($config->{uuid} . '\lib\uuid.lib');
}
if ($config->{zstd})
{
AddIncludeDir($config->{zstd});
# AddLibrary($config->{zstd}. "\\".($arch eq 'x64'? "zstdlib_x64.lib" : "zstdlib_x86.lib"));
AddLibrary32($config->{zstd}. "\\zstdlib_x86.lib");
AddLibrary64($config->{zstd}. "\\zstdlib_x64.lib") ;
}
# return $proj;
}

240
win32build96.pl Normal file
View File

@ -0,0 +1,240 @@
#!/usr/bin/perl
use JSON;
our $repack_version;
our $pgdir;
our $pgsrc;
if (@ARGV!=2) {
print STDERR "Usage $0 postgress-instalation-root pg-source-dir \n";
exit 1;
}
our $liblist="";
$pgdir = shift @ARGV;
$pgsrc = shift @ARGV if @ARGV;
our $arch = $ENV{'ARCH'} || "x64";
$arch='Win32' if ($arch eq 'x86' || $arch eq 'X86');
$arch='x64' if $arch eq 'X64';
$conffile = $pgsrc."/tools/msvc/config.pl";
die 'Could not find config.pl'
unless (-f $conffile);
our $config;
do $conffile;
if (! -d "$pgdir/bin" || !-d "$pgdir/include" || !-d "$pgdir/lib") {
print STDERR "Directory $pgdir doesn't look like root of postgresql installation\n";
exit 1;
}
our $includepath="";
our $libpath="";
our $libpath32="";
AddProject();
print "\n\n";
print $libpath."\n";
print $includepath."\n";
# open F,"<","META.json" or die "Cannot open META.json: $!\n";
# {
# local $/ = undef;
# $decoded = decode_json(<F>);
# $repack_version= $decoded->{'version'};
# }
# substitute new path in the project files
preprocess_project("./msvs/template.pg_probackup96.vcxproj","./msvs/pg_probackup.vcxproj");
exit 0;
sub preprocess_project {
my $in = shift;
my $out = shift;
our $pgdir;
our $adddir;
my $libs;
if (defined $adddir) {
$libs ="$adddir;";
} else{
$libs ="";
}
open IN,"<",$in or die "Cannot open $in: $!\n";
open OUT,">",$out or die "Cannot open $out: $!\n";
# $includepath .= ";";
# $libpath .= ";";
while (<IN>) {
s/\@PGROOT\@/$pgdir/g;
s/\@ADDLIBS\@/$libpath/g;
s/\@ADDLIBS32\@/$libpath32/g;
s/\@PGSRC\@/$pgsrc/g;
s/\@ADDINCLUDE\@/$includepath/g;
print OUT $_;
}
close IN;
close OUT;
}
# my sub
sub AddLibrary
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary32
{
$inc = shift;
if ($libpath32 ne '')
{
$libpath32 .= ';';
}
$libpath32 .= $inc;
}
sub AddLibrary64
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
}
sub AddIncludeDir
{
# my ($self, $inc) = @_;
$inc = shift;
if ($includepath ne '')
{
$includepath .= ';';
}
$includepath .= $inc;
}
sub AddProject
{
# my ($self, $name, $type, $folder, $initialdir) = @_;
if ($config->{zlib})
{
AddIncludeDir($config->{zlib} . '\include');
AddLibrary($config->{zlib} . '\lib\zdll.lib');
}
if ($config->{openssl})
{
AddIncludeDir($config->{openssl} . '\include');
if (-e "$config->{openssl}/lib/VC/ssleay32MD.lib")
{
AddLibrary(
$config->{openssl} . '\lib\VC\ssleay32.lib', 1);
AddLibrary(
$config->{openssl} . '\lib\VC\libeay32.lib', 1);
}
else
{
# We don't expect the config-specific library to be here,
# so don't ask for it in last parameter
AddLibrary(
$config->{openssl} . '\lib\ssleay32.lib', 0);
AddLibrary(
$config->{openssl} . '\lib\libeay32.lib', 0);
}
}
if ($config->{nls})
{
AddIncludeDir($config->{nls} . '\include');
AddLibrary($config->{nls} . '\lib\libintl.lib');
}
if ($config->{gss})
{
AddIncludeDir($config->{gss} . '\inc\krb5');
AddLibrary($config->{gss} . '\lib\i386\krb5_32.lib');
AddLibrary($config->{gss} . '\lib\i386\comerr32.lib');
AddLibrary($config->{gss} . '\lib\i386\gssapi32.lib');
}
if ($config->{iconv})
{
AddIncludeDir($config->{iconv} . '\include');
AddLibrary($config->{iconv} . '\lib\iconv.lib');
}
if ($config->{icu})
{
AddIncludeDir($config->{icu} . '\include');
AddLibrary32($config->{icu} . '\lib\icuin.lib');
AddLibrary32($config->{icu} . '\lib\icuuc.lib');
AddLibrary32($config->{icu} . '\lib\icudt.lib');
AddLibrary64($config->{icu} . '\lib64\icuin.lib');
AddLibrary64($config->{icu} . '\lib64\icuuc.lib');
AddLibrary64($config->{icu} . '\lib64\icudt.lib');
}
if ($config->{xml})
{
AddIncludeDir($config->{xml} . '\include');
AddIncludeDir($config->{xml} . '\include\libxml2');
AddLibrary($config->{xml} . '\lib\libxml2.lib');
}
if ($config->{xslt})
{
AddIncludeDir($config->{xslt} . '\include');
AddLibrary($config->{xslt} . '\lib\libxslt.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
# AddLibrary($config->{libedit} . "\\" .
# ($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
AddLibrary32($config->{libedit} . '\\lib32\edit.lib');
AddLibrary64($config->{libedit} . '\\lib64\edit.lib');
}
if ($config->{uuid})
{
AddIncludeDir($config->{uuid} . '\include');
AddLibrary($config->{uuid} . '\lib\uuid.lib');
}
if ($config->{zstd})
{
AddIncludeDir($config->{zstd});
# AddLibrary($config->{zstd}. "\\".($arch eq 'x64'? "zstdlib_x64.lib" : "zstdlib_x86.lib"));
AddLibrary32($config->{zstd}. "\\zstdlib_x86.lib");
AddLibrary64($config->{zstd}. "\\zstdlib_x64.lib") ;
}
# return $proj;
}

219
win32build_2.pl Normal file
View File

@ -0,0 +1,219 @@
#!/usr/bin/perl
use JSON;
our $repack_version;
our $pgdir;
our $pgsrc;
if (@ARGV!=2) {
print STDERR "Usage $0 postgress-instalation-root pg-source-dir \n";
exit 1;
}
our $liblist="";
$pgdir = shift @ARGV;
$pgsrc = shift @ARGV if @ARGV;
our $arch = $ENV{'ARCH'} || "x64";
$arch='Win32' if ($arch eq 'x86' || $arch eq 'X86');
$arch='x64' if $arch eq 'X64';
$conffile = $pgsrc."/tools/msvc/config.pl";
die 'Could not find config.pl'
unless (-f $conffile);
our $config;
do $conffile;
if (! -d "$pgdir/bin" || !-d "$pgdir/include" || !-d "$pgdir/lib") {
print STDERR "Directory $pgdir doesn't look like root of postgresql installation\n";
exit 1;
}
our $includepath="";
our $libpath="";
AddProject();
print "\n\n";
print $libpath."\n";
print $includepath."\n";
# open F,"<","META.json" or die "Cannot open META.json: $!\n";
# {
# local $/ = undef;
# $decoded = decode_json(<F>);
# $repack_version= $decoded->{'version'};
# }
# substitute new path in the project files
preprocess_project("./msvs/template.pg_probackup_2.vcxproj","./msvs/pg_probackup.vcxproj");
exit 0;
sub preprocess_project {
my $in = shift;
my $out = shift;
our $pgdir;
our $adddir;
my $libs;
if (defined $adddir) {
$libs ="$adddir;";
} else{
$libs ="";
}
open IN,"<",$in or die "Cannot open $in: $!\n";
open OUT,">",$out or die "Cannot open $out: $!\n";
# $includepath .= ";";
# $libpath .= ";";
while (<IN>) {
s/\@PGROOT\@/$pgdir/g;
s/\@ADDLIBS\@/$libpath/g;
s/\@PGSRC\@/$pgsrc/g;
s/\@ADDINCLUDE\@/$includepath/g;
print OUT $_;
}
close IN;
close OUT;
}
# my sub
sub AddLibrary
{
$inc = shift;
if ($libpath ne '')
{
$libpath .= ';';
}
$libpath .= $inc;
}
sub AddIncludeDir
{
# my ($self, $inc) = @_;
$inc = shift;
if ($includepath ne '')
{
$includepath .= ';';
}
$includepath .= $inc;
}
sub AddProject
{
# my ($self, $name, $type, $folder, $initialdir) = @_;
if ($config->{zlib})
{
AddIncludeDir($config->{zlib} . '\include');
AddLibrary($config->{zlib} . '\lib\zdll.lib');
}
if ($config->{openssl})
{
AddIncludeDir($config->{openssl} . '\include');
if (-e "$config->{openssl}/lib/VC/ssleay32MD.lib")
{
AddLibrary(
$config->{openssl} . '\lib\VC\ssleay32.lib', 1);
AddLibrary(
$config->{openssl} . '\lib\VC\libeay32.lib', 1);
}
else
{
# We don't expect the config-specific library to be here,
# so don't ask for it in last parameter
AddLibrary(
$config->{openssl} . '\lib\ssleay32.lib', 0);
AddLibrary(
$config->{openssl} . '\lib\libeay32.lib', 0);
}
}
if ($config->{nls})
{
AddIncludeDir($config->{nls} . '\include');
AddLibrary($config->{nls} . '\lib\libintl.lib');
}
if ($config->{gss})
{
AddIncludeDir($config->{gss} . '\inc\krb5');
AddLibrary($config->{gss} . '\lib\i386\krb5_32.lib');
AddLibrary($config->{gss} . '\lib\i386\comerr32.lib');
AddLibrary($config->{gss} . '\lib\i386\gssapi32.lib');
}
if ($config->{iconv})
{
AddIncludeDir($config->{iconv} . '\include');
AddLibrary($config->{iconv} . '\lib\iconv.lib');
}
if ($config->{icu})
{
AddIncludeDir($config->{icu} . '\include');
if ($arch eq 'Win32')
{
AddLibrary($config->{icu} . '\lib\icuin.lib');
AddLibrary($config->{icu} . '\lib\icuuc.lib');
AddLibrary($config->{icu} . '\lib\icudt.lib');
}
else
{
AddLibrary($config->{icu} . '\lib64\icuin.lib');
AddLibrary($config->{icu} . '\lib64\icuuc.lib');
AddLibrary($config->{icu} . '\lib64\icudt.lib');
}
}
if ($config->{xml})
{
AddIncludeDir($config->{xml} . '\include');
AddIncludeDir($config->{xml} . '\include\libxml2');
AddLibrary($config->{xml} . '\lib\libxml2.lib');
}
if ($config->{xslt})
{
AddIncludeDir($config->{xslt} . '\include');
AddLibrary($config->{xslt} . '\lib\libxslt.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
AddLibrary($config->{libedit} . "\\" .
($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
}
if ($config->{uuid})
{
AddIncludeDir($config->{uuid} . '\include');
AddLibrary($config->{uuid} . '\lib\uuid.lib');
}
if ($config->{libedit})
{
AddIncludeDir($config->{libedit} . '\include');
AddLibrary($config->{libedit} . "\\" .
($arch eq 'x64'? 'lib64': 'lib32').'\edit.lib');
}
if ($config->{zstd})
{
AddIncludeDir($config->{zstd});
AddLibrary($config->{zstd}. "\\".
($arch eq 'x64'? "zstdlib_x64.lib" : "zstdlib_x86.lib")
);
}
# return $proj;
}