mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-02-03 14:01:57 +02:00
PGPRO-427: Merge with master
This commit is contained in:
commit
ee3ce85235
190
gen_probackup_project.pl
Normal file
190
gen_probackup_project.pl
Normal file
@ -0,0 +1,190 @@
|
||||
# -*-perl-*- hey - emacs - this is a perl file
|
||||
BEGIN{
|
||||
use Cwd;
|
||||
use File::Basename;
|
||||
|
||||
my $pgsrc="";
|
||||
if (@ARGV==1)
|
||||
{
|
||||
$pgsrc = shift @ARGV;
|
||||
if($pgsrc == "--help"){
|
||||
print STDERR "Usage $0 pg-source-dir \n";
|
||||
print STDERR "Like this: \n";
|
||||
print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro \n";
|
||||
print STDERR "May be need input this before: \n";
|
||||
print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall\" amd64\n";
|
||||
exit 1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
use Cwd qw(abs_path);
|
||||
my $path = dirname(abs_path($0));
|
||||
chdir($path);
|
||||
chdir("../..");
|
||||
$pgsrc = cwd();
|
||||
}
|
||||
|
||||
chdir("$pgsrc/src/tools/msvc");
|
||||
push(@INC, "$pgsrc/src/tools/msvc");
|
||||
chdir("../../..") if (-d "../msvc" && -d "../../../src");
|
||||
|
||||
}
|
||||
|
||||
use Win32;
|
||||
use Carp;
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
|
||||
use Project;
|
||||
use Solution;
|
||||
use File::Copy;
|
||||
use Config;
|
||||
use VSObjectFactory;
|
||||
use List::Util qw(first);
|
||||
|
||||
use Exporter;
|
||||
our (@ISA, @EXPORT_OK);
|
||||
@ISA = qw(Exporter);
|
||||
@EXPORT_OK = qw(Mkvcbuild);
|
||||
|
||||
my $solution;
|
||||
my $libpgport;
|
||||
my $libpgcommon;
|
||||
my $libpgfeutils;
|
||||
my $postgres;
|
||||
my $libpq;
|
||||
my @unlink_on_exit;
|
||||
|
||||
|
||||
use lib "src/tools/msvc";
|
||||
|
||||
use Mkvcbuild;
|
||||
|
||||
# if (-e "src/tools/msvc/buildenv.pl")
|
||||
# {
|
||||
# do "src/tools/msvc/buildenv.pl";
|
||||
# }
|
||||
# elsif (-e "./buildenv.pl")
|
||||
# {
|
||||
# do "./buildenv.pl";
|
||||
# }
|
||||
|
||||
# set up the project
|
||||
our $config;
|
||||
do "config_default.pl";
|
||||
do "config.pl" if (-f "src/tools/msvc/config.pl");
|
||||
|
||||
# my $vcver = Mkvcbuild::mkvcbuild($config);
|
||||
my $vcver = build_pgprobackup($config);
|
||||
|
||||
# check what sort of build we are doing
|
||||
|
||||
my $bconf = $ENV{CONFIG} || "Release";
|
||||
my $msbflags = $ENV{MSBFLAGS} || "";
|
||||
my $buildwhat = $ARGV[1] || "";
|
||||
if (uc($ARGV[0]) eq 'DEBUG')
|
||||
{
|
||||
$bconf = "Debug";
|
||||
}
|
||||
elsif (uc($ARGV[0]) ne "RELEASE")
|
||||
{
|
||||
$buildwhat = $ARGV[0] || "";
|
||||
}
|
||||
|
||||
# ... and do it
|
||||
system("msbuild pg_probackup.vcxproj /verbosity:normal $msbflags /p:Configuration=$bconf" );
|
||||
|
||||
|
||||
# report status
|
||||
|
||||
my $status = $? >> 8;
|
||||
|
||||
exit $status;
|
||||
|
||||
|
||||
|
||||
sub build_pgprobackup
|
||||
{
|
||||
our $config = shift;
|
||||
|
||||
chdir('../../..') if (-d '../msvc' && -d '../../../src');
|
||||
die 'Must run from root or msvc directory'
|
||||
unless (-d 'src/tools/msvc' && -d 'src');
|
||||
|
||||
# my $vsVersion = DetermineVisualStudioVersion();
|
||||
my $vsVersion = '12.00';
|
||||
|
||||
$solution = CreateSolution($vsVersion, $config);
|
||||
|
||||
$libpq = $solution->AddProject('libpq', 'dll', 'interfaces',
|
||||
'src/interfaces/libpq');
|
||||
$libpgfeutils = $solution->AddProject('libpgfeutils', 'lib', 'misc');
|
||||
$libpgcommon = $solution->AddProject('libpgcommon', 'lib', 'misc');
|
||||
$libpgport = $solution->AddProject('libpgport', 'lib', 'misc');
|
||||
|
||||
#vvs test
|
||||
my $probackup =
|
||||
$solution->AddProject('pg_probackup', 'exe', 'pg_probackup'); #, 'contrib/pg_probackup'
|
||||
$probackup->AddFiles(
|
||||
'contrib/pg_probackup/src',
|
||||
'archive.c',
|
||||
'backup.c',
|
||||
'catalog.c',
|
||||
'configure.c',
|
||||
'data.c',
|
||||
'delete.c',
|
||||
'dir.c',
|
||||
'fetch.c',
|
||||
'help.c',
|
||||
'init.c',
|
||||
'parsexlog.c',
|
||||
'pg_probackup.c',
|
||||
'restore.c',
|
||||
'show.c',
|
||||
'status.c',
|
||||
'util.c',
|
||||
'validate.c'
|
||||
);
|
||||
$probackup->AddFiles(
|
||||
'contrib/pg_probackup/src/utils',
|
||||
'json.c',
|
||||
'logger.c',
|
||||
'parray.c',
|
||||
'pgut.c',
|
||||
'thread.c'
|
||||
);
|
||||
$probackup->AddFile('src/backend/access/transam/xlogreader.c');
|
||||
$probackup->AddFiles(
|
||||
'src/bin/pg_basebackup',
|
||||
'receivelog.c',
|
||||
'streamutil.c'
|
||||
);
|
||||
|
||||
if (-e 'src/bin/pg_basebackup/walmethods.c')
|
||||
{
|
||||
$probackup->AddFile('src/bin/pg_basebackup/walmethods.c');
|
||||
}
|
||||
|
||||
$probackup->AddFile('src/bin/pg_rewind/datapagemap.c');
|
||||
|
||||
$probackup->AddFile('src/interfaces/libpq/pthread-win32.c');
|
||||
|
||||
$probackup->AddIncludeDir('src/bin/pg_basebackup');
|
||||
$probackup->AddIncludeDir('src/bin/pg_rewind');
|
||||
$probackup->AddIncludeDir('src/interfaces/libpq');
|
||||
$probackup->AddIncludeDir('src');
|
||||
$probackup->AddIncludeDir('src/port');
|
||||
|
||||
$probackup->AddIncludeDir('contrib/pg_probackup');
|
||||
$probackup->AddIncludeDir('contrib/pg_probackup/src');
|
||||
$probackup->AddIncludeDir('contrib/pg_probackup/src/utils');
|
||||
|
||||
$probackup->AddReference($libpq, $libpgfeutils, $libpgcommon, $libpgport);
|
||||
$probackup->AddLibrary('ws2_32.lib');
|
||||
|
||||
$probackup->Save();
|
||||
return $solution->{vcver};
|
||||
|
||||
}
|
24
src/backup.c
24
src/backup.c
@ -329,7 +329,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
|
||||
{
|
||||
write_buffer_size = Min(row_length, sizeof(buf));
|
||||
memcpy(buf, copybuf, write_buffer_size);
|
||||
COMP_CRC32C(file->crc, &buf, write_buffer_size);
|
||||
COMP_CRC32C(file->crc, buf, write_buffer_size);
|
||||
|
||||
/* TODO calc checksum*/
|
||||
if (fwrite(buf, 1, write_buffer_size, out) != write_buffer_size)
|
||||
@ -732,7 +732,7 @@ do_backup_instance(void)
|
||||
else
|
||||
pthread_create(&threads[i], NULL, remote_backup_files, arg);
|
||||
}
|
||||
|
||||
|
||||
/* Wait threads */
|
||||
for (i = 0; i < num_threads; i++)
|
||||
{
|
||||
@ -854,7 +854,7 @@ do_backup(time_t start_time)
|
||||
elog(WARNING, "This PostgreSQL instance was initialized without data block checksums. "
|
||||
"pg_probackup have no way to detect data block corruption without them. "
|
||||
"Reinitialize PGDATA with option '--data-checksums'.");
|
||||
|
||||
|
||||
StrNCpy(current.server_version, server_version_str,
|
||||
sizeof(current.server_version));
|
||||
current.stream = stream_wal;
|
||||
@ -1030,7 +1030,7 @@ check_system_identifiers(void)
|
||||
|
||||
system_id_pgdata = get_system_identifier(pgdata);
|
||||
system_id_conn = get_remote_system_identifier(backup_conn);
|
||||
|
||||
|
||||
if (system_id_conn != system_identifier)
|
||||
elog(ERROR, "Backup data directory was initialized for system id %ld, but connected instance system id is %ld",
|
||||
system_identifier, system_id_conn);
|
||||
@ -1053,13 +1053,13 @@ confirm_block_size(const char *name, int blcksz)
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.current_setting($1)", 1, &name);
|
||||
if (PQntuples(res) != 1 || PQnfields(res) != 1)
|
||||
elog(ERROR, "cannot get %s: %s", name, PQerrorMessage(backup_conn));
|
||||
|
||||
|
||||
block_size = strtol(PQgetvalue(res, 0, 0), &endp, 10);
|
||||
if ((endp && *endp) || block_size != blcksz)
|
||||
elog(ERROR,
|
||||
"%s(%d) is not compatible(%d expected)",
|
||||
name, block_size, blcksz);
|
||||
|
||||
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
@ -1846,7 +1846,7 @@ pg_stop_backup(pgBackup *backup)
|
||||
elog(ERROR,
|
||||
"result of txid_snapshot_xmax() is invalid: %s",
|
||||
PQgetvalue(res, 0, 0));
|
||||
if (!parse_time(PQgetvalue(res, 0, 1), &recovery_time))
|
||||
if (!parse_time(PQgetvalue(res, 0, 1), &recovery_time, true))
|
||||
elog(ERROR,
|
||||
"result of current_timestamp is invalid: %s",
|
||||
PQgetvalue(res, 0, 1));
|
||||
@ -2034,7 +2034,7 @@ backup_files(void *arg)
|
||||
pgFile *file = (pgFile *) parray_get(arguments->files_list, i);
|
||||
|
||||
elog(VERBOSE, "Copying file: \"%s\" ", file->path);
|
||||
if (!pg_atomic_test_set_flag(&file->lock))
|
||||
if (!pg_atomic_test_set_flag(&file->lock))
|
||||
continue;
|
||||
|
||||
/* check for interrupt */
|
||||
@ -2402,12 +2402,12 @@ make_pagemap_from_ptrack(parray *files)
|
||||
|
||||
if (file->is_datafile)
|
||||
{
|
||||
if (file->tblspcOid == tblspcOid_with_ptrack_init
|
||||
&& file->dbOid == dbOid_with_ptrack_init)
|
||||
if (file->tblspcOid == tblspcOid_with_ptrack_init &&
|
||||
file->dbOid == dbOid_with_ptrack_init)
|
||||
{
|
||||
/* ignore ptrack if ptrack_init exists */
|
||||
elog(VERBOSE, "Ignoring ptrack because of ptrack_init for file: %s", file->path);
|
||||
file->pagemap.bitmapsize = PageBitmapIsAbsent;
|
||||
file->pagemap_isabsent = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2460,7 +2460,7 @@ make_pagemap_from_ptrack(parray *files)
|
||||
* - target relation was deleted.
|
||||
*/
|
||||
elog(VERBOSE, "Ptrack is missing for file: %s", file->path);
|
||||
file->pagemap.bitmapsize = PageBitmapIsAbsent;
|
||||
file->pagemap_isabsent = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -250,14 +250,14 @@ IsDir(const char *dirpath, const char *entry)
|
||||
parray *
|
||||
catalog_get_backup_list(time_t requested_backup_id)
|
||||
{
|
||||
DIR *date_dir = NULL;
|
||||
struct dirent *date_ent = NULL;
|
||||
DIR *data_dir = NULL;
|
||||
struct dirent *data_ent = NULL;
|
||||
parray *backups = NULL;
|
||||
pgBackup *backup = NULL;
|
||||
|
||||
/* open backup instance backups directory */
|
||||
date_dir = opendir(backup_instance_path);
|
||||
if (date_dir == NULL)
|
||||
data_dir = opendir(backup_instance_path);
|
||||
if (data_dir == NULL)
|
||||
{
|
||||
elog(WARNING, "cannot open directory \"%s\": %s", backup_instance_path,
|
||||
strerror(errno));
|
||||
@ -266,21 +266,21 @@ catalog_get_backup_list(time_t requested_backup_id)
|
||||
|
||||
/* scan the directory and list backups */
|
||||
backups = parray_new();
|
||||
for (; (date_ent = readdir(date_dir)) != NULL; errno = 0)
|
||||
for (; (data_ent = readdir(data_dir)) != NULL; errno = 0)
|
||||
{
|
||||
char backup_conf_path[MAXPGPATH];
|
||||
char date_path[MAXPGPATH];
|
||||
char data_path[MAXPGPATH];
|
||||
|
||||
/* skip not-directory entries and hidden entries */
|
||||
if (!IsDir(backup_instance_path, date_ent->d_name)
|
||||
|| date_ent->d_name[0] == '.')
|
||||
if (!IsDir(backup_instance_path, data_ent->d_name)
|
||||
|| data_ent->d_name[0] == '.')
|
||||
continue;
|
||||
|
||||
/* open subdirectory of specific backup */
|
||||
join_path_components(date_path, backup_instance_path, date_ent->d_name);
|
||||
join_path_components(data_path, backup_instance_path, data_ent->d_name);
|
||||
|
||||
/* read backup information from BACKUP_CONTROL_FILE */
|
||||
snprintf(backup_conf_path, MAXPGPATH, "%s/%s", date_path, BACKUP_CONTROL_FILE);
|
||||
snprintf(backup_conf_path, MAXPGPATH, "%s/%s", data_path, BACKUP_CONTROL_FILE);
|
||||
backup = readBackupControlFile(backup_conf_path);
|
||||
|
||||
/* ignore corrupted backups */
|
||||
@ -298,8 +298,8 @@ catalog_get_backup_list(time_t requested_backup_id)
|
||||
|
||||
if (errno && errno != ENOENT)
|
||||
{
|
||||
elog(WARNING, "cannot read date directory \"%s\": %s",
|
||||
date_ent->d_name, strerror(errno));
|
||||
elog(WARNING, "cannot read data directory \"%s\": %s",
|
||||
data_ent->d_name, strerror(errno));
|
||||
goto err_proc;
|
||||
}
|
||||
}
|
||||
@ -310,16 +310,16 @@ catalog_get_backup_list(time_t requested_backup_id)
|
||||
goto err_proc;
|
||||
}
|
||||
|
||||
closedir(date_dir);
|
||||
date_dir = NULL;
|
||||
closedir(data_dir);
|
||||
data_dir = NULL;
|
||||
|
||||
parray_qsort(backups, pgBackupCompareIdDesc);
|
||||
|
||||
return backups;
|
||||
|
||||
err_proc:
|
||||
if (date_dir)
|
||||
closedir(date_dir);
|
||||
if (data_dir)
|
||||
closedir(data_dir);
|
||||
if (backup)
|
||||
pgBackupFree(backup);
|
||||
if (backups)
|
||||
|
224
src/data.c
224
src/data.c
@ -27,7 +27,7 @@
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
/* Implementation of zlib compression method */
|
||||
static size_t zlib_compress(void* dst, size_t dst_size, void const* src, size_t src_size)
|
||||
static int32 zlib_compress(void* dst, size_t dst_size, void const* src, size_t src_size)
|
||||
{
|
||||
uLongf compressed_size = dst_size;
|
||||
int rc = compress2(dst, &compressed_size, src, src_size, compress_level);
|
||||
@ -35,7 +35,7 @@ static size_t zlib_compress(void* dst, size_t dst_size, void const* src, size_t
|
||||
}
|
||||
|
||||
/* Implementation of zlib compression method */
|
||||
static size_t zlib_decompress(void* dst, size_t dst_size, void const* src, size_t src_size)
|
||||
static int32 zlib_decompress(void* dst, size_t dst_size, void const* src, size_t src_size)
|
||||
{
|
||||
uLongf dest_len = dst_size;
|
||||
int rc = uncompress(dst, &dest_len, src, src_size);
|
||||
@ -47,7 +47,7 @@ static size_t zlib_decompress(void* dst, size_t dst_size, void const* src, size_
|
||||
* Compresses source into dest using algorithm. Returns the number of bytes
|
||||
* written in the destination buffer, or -1 if compression fails.
|
||||
*/
|
||||
static size_t
|
||||
static int32
|
||||
do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, CompressAlg alg)
|
||||
{
|
||||
switch (alg)
|
||||
@ -70,7 +70,7 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size, Compre
|
||||
* Decompresses source into dest using algorithm. Returns the number of bytes
|
||||
* decompressed in the destination buffer, or -1 if decompression fails.
|
||||
*/
|
||||
static size_t
|
||||
static int32
|
||||
do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size, CompressAlg alg)
|
||||
{
|
||||
switch (alg)
|
||||
@ -101,6 +101,7 @@ typedef struct BackupPageHeader
|
||||
|
||||
/* Special value for compressed_size field */
|
||||
#define PageIsTruncated -2
|
||||
#define SkipCurrentPage -3
|
||||
|
||||
/* Verify page's header */
|
||||
static bool
|
||||
@ -134,8 +135,8 @@ static int
|
||||
read_page_from_file(pgFile *file, BlockNumber blknum,
|
||||
FILE *in, Page page, XLogRecPtr *page_lsn)
|
||||
{
|
||||
off_t offset = blknum*BLCKSZ;
|
||||
size_t read_len = 0;
|
||||
off_t offset = blknum * BLCKSZ;
|
||||
size_t read_len = 0;
|
||||
|
||||
/* read the block */
|
||||
if (fseek(in, offset, SEEK_SET) != 0)
|
||||
@ -216,31 +217,32 @@ read_page_from_file(pgFile *file, BlockNumber blknum,
|
||||
}
|
||||
|
||||
/*
|
||||
* Backup the specified block from a file of a relation.
|
||||
* Verify page header and checksum of the page and write it
|
||||
* to the backup file.
|
||||
* Retrieves a page taking the backup mode into account
|
||||
* and writes it into argument "page". Argument "page"
|
||||
* should be a pointer to allocated BLCKSZ of bytes.
|
||||
*
|
||||
* Prints appropriate warnings/errors/etc into log.
|
||||
* Returns 0 if page was successfully retrieved
|
||||
* SkipCurrentPage(-3) if we need to skip this page
|
||||
* PageIsTruncated(-2) if the page was truncated
|
||||
*/
|
||||
static void
|
||||
backup_data_page(backup_files_arg *arguments,
|
||||
pgFile *file, XLogRecPtr prev_backup_start_lsn,
|
||||
BlockNumber blknum, BlockNumber nblocks,
|
||||
FILE *in, FILE *out,
|
||||
pg_crc32 *crc, int *n_skipped,
|
||||
BackupMode backup_mode)
|
||||
static int32
|
||||
prepare_page(backup_files_arg *arguments,
|
||||
pgFile *file, XLogRecPtr prev_backup_start_lsn,
|
||||
BlockNumber blknum, BlockNumber nblocks,
|
||||
FILE *in, int *n_skipped,
|
||||
BackupMode backup_mode,
|
||||
Page page)
|
||||
{
|
||||
BackupPageHeader header;
|
||||
Page page = malloc(BLCKSZ);
|
||||
Page compressed_page = NULL;
|
||||
XLogRecPtr page_lsn = 0;
|
||||
size_t write_buffer_size;
|
||||
char write_buffer[BLCKSZ+sizeof(header)];
|
||||
|
||||
int try_again = 100;
|
||||
bool page_is_valid = false;
|
||||
XLogRecPtr page_lsn = 0;
|
||||
int try_again = 100;
|
||||
bool page_is_valid = false;
|
||||
bool page_is_truncated = false;
|
||||
BlockNumber absolute_blknum = file->segno * RELSEG_SIZE + blknum;
|
||||
|
||||
header.block = blknum;
|
||||
header.compressed_size = 0;
|
||||
/* check for interrupt */
|
||||
if (interrupted)
|
||||
elog(ERROR, "Interrupted during backup");
|
||||
|
||||
/*
|
||||
* Read the page and verify its header and checksum.
|
||||
@ -258,7 +260,7 @@ backup_data_page(backup_files_arg *arguments,
|
||||
if (result == 0)
|
||||
{
|
||||
/* This block was truncated.*/
|
||||
header.compressed_size = PageIsTruncated;
|
||||
page_is_truncated = true;
|
||||
/* Page is not actually valid, but it is absent
|
||||
* and we're not going to reread it or validate */
|
||||
page_is_valid = true;
|
||||
@ -291,35 +293,38 @@ backup_data_page(backup_files_arg *arguments,
|
||||
if (backup_mode == BACKUP_MODE_DIFF_PTRACK || (!page_is_valid && is_ptrack_support))
|
||||
{
|
||||
size_t page_size = 0;
|
||||
|
||||
free(page);
|
||||
page = NULL;
|
||||
page = (Page) pg_ptrack_get_block(arguments, file->dbOid, file->tblspcOid,
|
||||
Page ptrack_page = NULL;
|
||||
ptrack_page = (Page) pg_ptrack_get_block(arguments, file->dbOid, file->tblspcOid,
|
||||
file->relOid, absolute_blknum, &page_size);
|
||||
|
||||
if (page == NULL)
|
||||
if (ptrack_page == NULL)
|
||||
{
|
||||
/* This block was truncated.*/
|
||||
header.compressed_size = PageIsTruncated;
|
||||
page_is_truncated = true;
|
||||
}
|
||||
else if (page_size != BLCKSZ)
|
||||
{
|
||||
free(ptrack_page);
|
||||
elog(ERROR, "File: %s, block %u, expected block size %d, but read %lu",
|
||||
file->path, absolute_blknum, BLCKSZ, page_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* We need to copy the page that was successfully
|
||||
* retreieved from ptrack into our output "page" parameter.
|
||||
* We must set checksum here, because it is outdated
|
||||
* in the block recieved from shared buffers.
|
||||
*/
|
||||
memcpy(page, ptrack_page, BLCKSZ);
|
||||
free(ptrack_page);
|
||||
if (is_checksum_enabled)
|
||||
((PageHeader) page)->pd_checksum = pg_checksum_page(page, absolute_blknum);
|
||||
}
|
||||
/* get lsn from page, provided by pg_ptrack_get_block() */
|
||||
if (backup_mode == BACKUP_MODE_DIFF_DELTA &&
|
||||
file->exists_in_prev &&
|
||||
header.compressed_size != PageIsTruncated &&
|
||||
!page_is_truncated &&
|
||||
!parse_page(page, &page_lsn))
|
||||
elog(ERROR, "Cannot parse page after pg_ptrack_get_block. "
|
||||
"Possible risk of a memory corruption");
|
||||
@ -328,64 +333,82 @@ backup_data_page(backup_files_arg *arguments,
|
||||
|
||||
if (backup_mode == BACKUP_MODE_DIFF_DELTA &&
|
||||
file->exists_in_prev &&
|
||||
header.compressed_size != PageIsTruncated &&
|
||||
!page_is_truncated &&
|
||||
page_lsn < prev_backup_start_lsn)
|
||||
{
|
||||
elog(VERBOSE, "Skipping blknum: %u in file: %s", blknum, file->path);
|
||||
(*n_skipped)++;
|
||||
free(page);
|
||||
return;
|
||||
return SkipCurrentPage;
|
||||
}
|
||||
|
||||
if (header.compressed_size != PageIsTruncated)
|
||||
{
|
||||
file->read_size += BLCKSZ;
|
||||
if (page_is_truncated)
|
||||
return PageIsTruncated;
|
||||
|
||||
compressed_page = malloc(BLCKSZ);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
compress_and_backup_page(pgFile *file, BlockNumber blknum,
|
||||
FILE *in, FILE *out, pg_crc32 *crc,
|
||||
int page_state, Page page)
|
||||
{
|
||||
BackupPageHeader header;
|
||||
size_t write_buffer_size = sizeof(header);
|
||||
char write_buffer[BLCKSZ+sizeof(header)];
|
||||
char compressed_page[BLCKSZ];
|
||||
|
||||
if(page_state == SkipCurrentPage)
|
||||
return;
|
||||
|
||||
header.block = blknum;
|
||||
header.compressed_size = page_state;
|
||||
|
||||
if(page_state == PageIsTruncated)
|
||||
{
|
||||
/*
|
||||
* The page was truncated. Write only header
|
||||
* to know that we must truncate restored file
|
||||
*/
|
||||
memcpy(write_buffer, &header, sizeof(header));
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The page was not truncated, so we need to compress it */
|
||||
header.compressed_size = do_compress(compressed_page, BLCKSZ,
|
||||
page, BLCKSZ, compress_alg);
|
||||
page, BLCKSZ, compress_alg);
|
||||
|
||||
file->compress_alg = compress_alg;
|
||||
|
||||
file->read_size += BLCKSZ;
|
||||
Assert (header.compressed_size <= BLCKSZ);
|
||||
}
|
||||
|
||||
write_buffer_size = sizeof(header);
|
||||
|
||||
/*
|
||||
* The page was truncated. Write only header
|
||||
* to know that we must truncate restored file
|
||||
*/
|
||||
if (header.compressed_size == PageIsTruncated)
|
||||
{
|
||||
memcpy(write_buffer, &header, sizeof(header));
|
||||
}
|
||||
/* The page compression failed. Write it as is. */
|
||||
else if (header.compressed_size == -1)
|
||||
{
|
||||
header.compressed_size = BLCKSZ;
|
||||
memcpy(write_buffer, &header, sizeof(header));
|
||||
memcpy(write_buffer + sizeof(header), page, BLCKSZ);
|
||||
write_buffer_size += header.compressed_size;
|
||||
}
|
||||
/* The page was successfully compressed */
|
||||
else if (header.compressed_size > 0)
|
||||
{
|
||||
memcpy(write_buffer, &header, sizeof(header));
|
||||
memcpy(write_buffer + sizeof(header), compressed_page, header.compressed_size);
|
||||
write_buffer_size += MAXALIGN(header.compressed_size);
|
||||
/* The page was successfully compressed. */
|
||||
if (header.compressed_size > 0)
|
||||
{
|
||||
memcpy(write_buffer, &header, sizeof(header));
|
||||
memcpy(write_buffer + sizeof(header),
|
||||
compressed_page, header.compressed_size);
|
||||
write_buffer_size += MAXALIGN(header.compressed_size);
|
||||
}
|
||||
/* Nonpositive value means that compression failed. Write it as is. */
|
||||
else
|
||||
{
|
||||
header.compressed_size = BLCKSZ;
|
||||
memcpy(write_buffer, &header, sizeof(header));
|
||||
memcpy(write_buffer + sizeof(header), page, BLCKSZ);
|
||||
write_buffer_size += header.compressed_size;
|
||||
}
|
||||
}
|
||||
|
||||
/* elog(VERBOSE, "backup blkno %u, compressed_size %d write_buffer_size %ld",
|
||||
blknum, header.compressed_size, write_buffer_size); */
|
||||
|
||||
/* Update CRC */
|
||||
COMP_CRC32C(*crc, &write_buffer, write_buffer_size);
|
||||
COMP_CRC32C(*crc, write_buffer, write_buffer_size);
|
||||
|
||||
/* write data page */
|
||||
if(fwrite(write_buffer, 1, write_buffer_size, out) != write_buffer_size)
|
||||
{
|
||||
int errno_tmp = errno;
|
||||
int errno_tmp = errno;
|
||||
fclose(in);
|
||||
fclose(out);
|
||||
elog(ERROR, "File: %s, cannot write backup at block %u : %s",
|
||||
@ -393,11 +416,6 @@ backup_data_page(backup_files_arg *arguments,
|
||||
}
|
||||
|
||||
file->write_size += write_buffer_size;
|
||||
|
||||
if (page != NULL)
|
||||
free(page);
|
||||
if (compressed_page != NULL)
|
||||
free(compressed_page);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -414,13 +432,15 @@ backup_data_file(backup_files_arg* arguments,
|
||||
pgFile *file, XLogRecPtr prev_backup_start_lsn,
|
||||
BackupMode backup_mode)
|
||||
{
|
||||
char to_path[MAXPGPATH];
|
||||
FILE *in;
|
||||
FILE *out;
|
||||
BlockNumber blknum = 0;
|
||||
BlockNumber nblocks = 0;
|
||||
int n_blocks_skipped = 0;
|
||||
int n_blocks_read = 0;
|
||||
char to_path[MAXPGPATH];
|
||||
FILE *in;
|
||||
FILE *out;
|
||||
BlockNumber blknum = 0;
|
||||
BlockNumber nblocks = 0;
|
||||
int n_blocks_skipped = 0;
|
||||
int n_blocks_read = 0;
|
||||
int page_state;
|
||||
char curr_page[BLCKSZ];
|
||||
|
||||
/*
|
||||
* Skip unchanged file only if it exists in previous backup.
|
||||
@ -430,7 +450,7 @@ backup_data_file(backup_files_arg* arguments,
|
||||
if ((backup_mode == BACKUP_MODE_DIFF_PAGE ||
|
||||
backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
|
||||
file->pagemap.bitmapsize == PageBitmapIsEmpty &&
|
||||
file->exists_in_prev)
|
||||
file->exists_in_prev && !file->pagemap_isabsent)
|
||||
{
|
||||
/*
|
||||
* There are no changed blocks since last backup. We want make
|
||||
@ -494,15 +514,16 @@ backup_data_file(backup_files_arg* arguments,
|
||||
* If page map is empty or file is not present in previous backup
|
||||
* backup all pages of the relation.
|
||||
*/
|
||||
if (file->pagemap.bitmapsize == PageBitmapIsEmpty
|
||||
|| file->pagemap.bitmapsize == PageBitmapIsAbsent
|
||||
|| !file->exists_in_prev)
|
||||
if (file->pagemap.bitmapsize == PageBitmapIsEmpty ||
|
||||
file->pagemap_isabsent || !file->exists_in_prev)
|
||||
{
|
||||
for (blknum = 0; blknum < nblocks; blknum++)
|
||||
{
|
||||
backup_data_page(arguments, file, prev_backup_start_lsn, blknum,
|
||||
nblocks, in, out, &(file->crc),
|
||||
&n_blocks_skipped, backup_mode);
|
||||
page_state = prepare_page(arguments, file, prev_backup_start_lsn,
|
||||
blknum, nblocks, in, &n_blocks_skipped,
|
||||
backup_mode, curr_page);
|
||||
compress_and_backup_page(file, blknum, in, out, &(file->crc),
|
||||
page_state, curr_page);
|
||||
n_blocks_read++;
|
||||
}
|
||||
if (backup_mode == BACKUP_MODE_DIFF_DELTA)
|
||||
@ -515,9 +536,11 @@ backup_data_file(backup_files_arg* arguments,
|
||||
iter = datapagemap_iterate(&file->pagemap);
|
||||
while (datapagemap_next(iter, &blknum))
|
||||
{
|
||||
backup_data_page(arguments, file, prev_backup_start_lsn, blknum,
|
||||
nblocks, in, out, &(file->crc),
|
||||
&n_blocks_skipped, backup_mode);
|
||||
page_state = prepare_page(arguments, file, prev_backup_start_lsn,
|
||||
blknum, nblocks, in, &n_blocks_skipped,
|
||||
backup_mode, curr_page);
|
||||
compress_and_backup_page(file, blknum, in, out, &(file->crc),
|
||||
page_state, curr_page);
|
||||
n_blocks_read++;
|
||||
}
|
||||
|
||||
@ -632,7 +655,8 @@ restore_data_file(const char *from_root,
|
||||
}
|
||||
|
||||
if (header.block < blknum)
|
||||
elog(ERROR, "backup is broken at file->path %s block %u",file->path, blknum);
|
||||
elog(ERROR, "backup is broken at file->path %s block %u",
|
||||
file->path, blknum);
|
||||
|
||||
if (header.compressed_size == PageIsTruncated)
|
||||
{
|
||||
@ -643,7 +667,8 @@ restore_data_file(const char *from_root,
|
||||
if (ftruncate(fileno(out), header.block * BLCKSZ) != 0)
|
||||
elog(ERROR, "cannot truncate \"%s\": %s",
|
||||
file->path, strerror(errno));
|
||||
elog(VERBOSE, "truncate file %s to block %u", file->path, header.block);
|
||||
elog(VERBOSE, "truncate file %s to block %u",
|
||||
file->path, header.block);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -661,10 +686,12 @@ restore_data_file(const char *from_root,
|
||||
|
||||
uncompressed_size = do_decompress(page.data, BLCKSZ,
|
||||
compressed_page.data,
|
||||
header.compressed_size, file->compress_alg);
|
||||
header.compressed_size,
|
||||
file->compress_alg);
|
||||
|
||||
if (uncompressed_size != BLCKSZ)
|
||||
elog(ERROR, "page uncompressed to %ld bytes. != BLCKSZ", uncompressed_size);
|
||||
elog(ERROR, "page uncompressed to %ld bytes. != BLCKSZ",
|
||||
uncompressed_size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -711,7 +738,8 @@ restore_data_file(const char *from_root,
|
||||
if (ftruncate(fileno(out), file->n_blocks * BLCKSZ) != 0)
|
||||
elog(ERROR, "cannot truncate \"%s\": %s",
|
||||
file->path, strerror(errno));
|
||||
elog(INFO, "Delta truncate file %s to block %u", file->path, file->n_blocks);
|
||||
elog(INFO, "Delta truncate file %s to block %u",
|
||||
file->path, file->n_blocks);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,8 @@ pgFileInit(const char *path)
|
||||
file->is_datafile = false;
|
||||
file->linked = NULL;
|
||||
file->pagemap.bitmap = NULL;
|
||||
file->pagemap.bitmapsize = PageBitmapIsAbsent;
|
||||
file->pagemap.bitmapsize = PageBitmapIsEmpty;
|
||||
file->pagemap_isabsent = false;
|
||||
file->tblspcOid = 0;
|
||||
file->dbOid = 0;
|
||||
file->relOid = 0;
|
||||
|
@ -121,10 +121,12 @@ help_pg_probackup(void)
|
||||
printf(_(" [--immediate] [--recovery-target-name=target-name]\n"));
|
||||
printf(_(" [--recovery-target-action=pause|promote|shutdown]\n"));
|
||||
printf(_(" [--restore-as-replica]\n"));
|
||||
printf(_(" [--no-validate]\n"));
|
||||
|
||||
printf(_("\n %s validate -B backup-dir [--instance=instance_name]\n"), PROGRAM_NAME);
|
||||
printf(_(" [-i backup-id] [--progress]\n"));
|
||||
printf(_(" [--time=time|--xid=xid [--inclusive=boolean]]\n"));
|
||||
printf(_(" [--recovery-target-name=target-name]\n"));
|
||||
printf(_(" [--timeline=timeline]\n"));
|
||||
|
||||
printf(_("\n %s show -B backup-dir\n"), PROGRAM_NAME);
|
||||
@ -267,7 +269,7 @@ help_restore(void)
|
||||
printf(_(" [--timeline=timeline] [-T OLDDIR=NEWDIR]\n"));
|
||||
printf(_(" [--immediate] [--recovery-target-name=target-name]\n"));
|
||||
printf(_(" [--recovery-target-action=pause|promote|shutdown]\n"));
|
||||
printf(_(" [--restore-as-replica]\n\n"));
|
||||
printf(_(" [--restore-as-replica] [--no-validate]\n\n"));
|
||||
|
||||
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
|
||||
printf(_(" --instance=instance_name name of the instance\n"));
|
||||
@ -292,6 +294,7 @@ help_restore(void)
|
||||
|
||||
printf(_(" -R, --restore-as-replica write a minimal recovery.conf in the output directory\n"));
|
||||
printf(_(" to ease setting up a standby server\n"));
|
||||
printf(_(" --no-validate disable backup validation during restore\n"));
|
||||
|
||||
printf(_("\n Logging options:\n"));
|
||||
printf(_(" --log-level-console=log-level-console\n"));
|
||||
@ -332,6 +335,8 @@ help_validate(void)
|
||||
printf(_(" --xid=xid transaction ID up to which recovery will proceed\n"));
|
||||
printf(_(" --inclusive=boolean whether we stop just after the recovery target\n"));
|
||||
printf(_(" --timeline=timeline recovering into a particular timeline\n"));
|
||||
printf(_(" --recovery-target-name=target-name\n"));
|
||||
printf(_(" the named restore point to which recovery will proceed\n"));
|
||||
|
||||
printf(_("\n Logging options:\n"));
|
||||
printf(_(" --log-level-console=log-level-console\n"));
|
||||
|
@ -70,6 +70,7 @@ static char *target_action = NULL;
|
||||
static pgRecoveryTarget *recovery_target_options = NULL;
|
||||
|
||||
bool restore_as_replica = false;
|
||||
bool restore_no_validate = false;
|
||||
|
||||
/* delete options */
|
||||
bool delete_wal = false;
|
||||
@ -148,6 +149,7 @@ static pgut_option options[] =
|
||||
{ 's', 25, "recovery-target-name", &target_name, SOURCE_CMDLINE },
|
||||
{ 's', 26, "recovery-target-action", &target_action, SOURCE_CMDLINE },
|
||||
{ 'b', 'R', "restore-as-replica", &restore_as_replica, SOURCE_CMDLINE },
|
||||
{ 'b', 27, "no-validate", &restore_no_validate, SOURCE_CMDLINE },
|
||||
/* delete options */
|
||||
{ 'b', 130, "wal", &delete_wal, SOURCE_CMDLINE },
|
||||
{ 'b', 131, "expired", &delete_expired, SOURCE_CMDLINE },
|
||||
@ -196,7 +198,6 @@ main(int argc, char *argv[])
|
||||
{
|
||||
char *command = NULL,
|
||||
*command_name;
|
||||
char path[MAXPGPATH];
|
||||
/* Check if backup_path is directory. */
|
||||
struct stat stat_buf;
|
||||
int rc;
|
||||
@ -377,6 +378,8 @@ main(int argc, char *argv[])
|
||||
*/
|
||||
if (instance_name && backup_subcmd != SET_CONFIG_CMD)
|
||||
{
|
||||
char path[MAXPGPATH];
|
||||
|
||||
/* Read environment variables */
|
||||
pgut_getopt_env(options);
|
||||
|
||||
@ -434,7 +437,7 @@ main(int argc, char *argv[])
|
||||
/* parse all recovery target options into recovery_target_options structure */
|
||||
recovery_target_options = parseRecoveryTargetOptions(target_time, target_xid,
|
||||
target_inclusive, target_tli, target_immediate,
|
||||
target_name, target_action);
|
||||
target_name, target_action, restore_no_validate);
|
||||
}
|
||||
|
||||
if (num_threads < 1)
|
||||
|
@ -108,11 +108,12 @@ typedef struct pgFile
|
||||
CompressAlg compress_alg; /* compression algorithm applied to the file */
|
||||
volatile pg_atomic_flag lock; /* lock for synchronization of parallel threads */
|
||||
datapagemap_t pagemap; /* bitmap of pages updated since previous backup */
|
||||
bool pagemap_isabsent; /* Used to mark files with unknown state of pagemap,
|
||||
* i.e. datafiles without _ptrack */
|
||||
} pgFile;
|
||||
|
||||
/* Special values of datapagemap_t bitmapsize */
|
||||
#define PageBitmapIsEmpty 0 /* Used to mark unchanged datafiles */
|
||||
#define PageBitmapIsAbsent -1 /* Used to mark files with unknown state of pagemap, i.e. datafiles without _ptrack */
|
||||
|
||||
/* Current state of backup */
|
||||
typedef enum BackupStatus
|
||||
@ -263,6 +264,7 @@ typedef struct pgRecoveryTarget
|
||||
bool recovery_target_immediate;
|
||||
const char *recovery_target_name;
|
||||
const char *recovery_target_action;
|
||||
bool restore_no_validate;
|
||||
} pgRecoveryTarget;
|
||||
|
||||
/* Union to ease operations on relation pages */
|
||||
@ -398,7 +400,7 @@ extern parray * readTimeLineHistory_probackup(TimeLineID targetTLI);
|
||||
extern pgRecoveryTarget *parseRecoveryTargetOptions(
|
||||
const char *target_time, const char *target_xid,
|
||||
const char *target_inclusive, TimeLineID target_tli, bool target_immediate,
|
||||
const char *target_name, const char *target_action);
|
||||
const char *target_name, const char *target_action, bool restore_no_validate);
|
||||
|
||||
extern void opt_tablespace_map(pgut_option *opt, const char *arg);
|
||||
|
||||
|
129
src/restore.c
129
src/restore.c
@ -243,66 +243,69 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
if (is_restore)
|
||||
check_tablespace_mapping(dest_backup);
|
||||
|
||||
if (dest_backup->backup_mode != BACKUP_MODE_FULL)
|
||||
elog(INFO, "Validating parents for backup %s", base36enc(dest_backup->start_time));
|
||||
|
||||
/*
|
||||
* Validate backups from base_full_backup to dest_backup.
|
||||
*/
|
||||
for (i = base_full_backup_index; i >= dest_backup_index; i--)
|
||||
if (!is_restore || !rt->restore_no_validate)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backups, i);
|
||||
pgBackupValidate(backup);
|
||||
/* Maybe we should be more paranoid and check for !BACKUP_STATUS_OK? */
|
||||
if (backup->status == BACKUP_STATUS_CORRUPT)
|
||||
{
|
||||
corrupted_backup = backup;
|
||||
corrupted_backup_index = i;
|
||||
break;
|
||||
}
|
||||
/* We do not validate WAL files of intermediate backups
|
||||
* It`s done to speed up restore
|
||||
*/
|
||||
}
|
||||
/* There is no point in wal validation
|
||||
* if there is corrupted backup between base_backup and dest_backup
|
||||
*/
|
||||
if (!corrupted_backup)
|
||||
if (dest_backup->backup_mode != BACKUP_MODE_FULL)
|
||||
elog(INFO, "Validating parents for backup %s", base36enc(dest_backup->start_time));
|
||||
|
||||
/*
|
||||
* Validate corresponding WAL files.
|
||||
* We pass base_full_backup timeline as last argument to this function,
|
||||
* because it's needed to form the name of xlog file.
|
||||
* Validate backups from base_full_backup to dest_backup.
|
||||
*/
|
||||
validate_wal(dest_backup, arclog_path, rt->recovery_target_time,
|
||||
rt->recovery_target_xid, base_full_backup->tli);
|
||||
|
||||
/* Set every incremental backup between corrupted backup and nearest FULL backup as orphans */
|
||||
if (corrupted_backup)
|
||||
{
|
||||
for (i = corrupted_backup_index - 1; i >= 0; i--)
|
||||
for (i = base_full_backup_index; i >= dest_backup_index; i--)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backups, i);
|
||||
/* Mark incremental OK backup as orphan */
|
||||
if (backup->backup_mode == BACKUP_MODE_FULL)
|
||||
break;
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
continue;
|
||||
else
|
||||
pgBackupValidate(backup);
|
||||
/* Maybe we should be more paranoid and check for !BACKUP_STATUS_OK? */
|
||||
if (backup->status == BACKUP_STATUS_CORRUPT)
|
||||
{
|
||||
char *backup_id,
|
||||
*corrupted_backup_id;
|
||||
corrupted_backup = backup;
|
||||
corrupted_backup_index = i;
|
||||
break;
|
||||
}
|
||||
/* We do not validate WAL files of intermediate backups
|
||||
* It`s done to speed up restore
|
||||
*/
|
||||
}
|
||||
/* There is no point in wal validation
|
||||
* if there is corrupted backup between base_backup and dest_backup
|
||||
*/
|
||||
if (!corrupted_backup)
|
||||
/*
|
||||
* Validate corresponding WAL files.
|
||||
* We pass base_full_backup timeline as last argument to this function,
|
||||
* because it's needed to form the name of xlog file.
|
||||
*/
|
||||
validate_wal(dest_backup, arclog_path, rt->recovery_target_time,
|
||||
rt->recovery_target_xid, base_full_backup->tli);
|
||||
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
/* Set every incremental backup between corrupted backup and nearest FULL backup as orphans */
|
||||
if (corrupted_backup)
|
||||
{
|
||||
for (i = corrupted_backup_index - 1; i >= 0; i--)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backups, i);
|
||||
/* Mark incremental OK backup as orphan */
|
||||
if (backup->backup_mode == BACKUP_MODE_FULL)
|
||||
break;
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
continue;
|
||||
else
|
||||
{
|
||||
char *backup_id,
|
||||
*corrupted_backup_id;
|
||||
|
||||
backup_id = base36enc_dup(backup->start_time);
|
||||
corrupted_backup_id = base36enc_dup(corrupted_backup->start_time);
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s is corrupted",
|
||||
backup_id, corrupted_backup_id);
|
||||
backup_id = base36enc_dup(backup->start_time);
|
||||
corrupted_backup_id = base36enc_dup(corrupted_backup->start_time);
|
||||
|
||||
free(backup_id);
|
||||
free(corrupted_backup_id);
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s is corrupted",
|
||||
backup_id, corrupted_backup_id);
|
||||
|
||||
free(backup_id);
|
||||
free(corrupted_backup_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -312,7 +315,12 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
* produce corresponding error message
|
||||
*/
|
||||
if (dest_backup->status == BACKUP_STATUS_OK)
|
||||
elog(INFO, "Backup %s is valid.", base36enc(dest_backup->start_time));
|
||||
{
|
||||
if (rt->restore_no_validate)
|
||||
elog(INFO, "Backup %s is used without validation.", base36enc(dest_backup->start_time));
|
||||
else
|
||||
elog(INFO, "Backup %s is valid.", base36enc(dest_backup->start_time));
|
||||
}
|
||||
else if (dest_backup->status == BACKUP_STATUS_CORRUPT)
|
||||
elog(ERROR, "Backup %s is corrupt.", base36enc(dest_backup->start_time));
|
||||
else if (dest_backup->status == BACKUP_STATUS_ORPHAN)
|
||||
@ -582,14 +590,6 @@ restore_directories(const char *pg_data_dir, const char *backup_dir)
|
||||
linked_path, dir_created, link_name);
|
||||
}
|
||||
|
||||
/*
|
||||
* This check was done in check_tablespace_mapping(). But do
|
||||
* it again.
|
||||
*/
|
||||
if (!dir_is_empty(linked_path))
|
||||
elog(ERROR, "restore tablespace destination is not empty: \"%s\"",
|
||||
linked_path);
|
||||
|
||||
if (link_sep)
|
||||
elog(LOG, "create directory \"%s\" and symbolic link \"%.*s\"",
|
||||
linked_path,
|
||||
@ -1012,7 +1012,8 @@ parseRecoveryTargetOptions(const char *target_time,
|
||||
TimeLineID target_tli,
|
||||
bool target_immediate,
|
||||
const char *target_name,
|
||||
const char *target_action)
|
||||
const char *target_action,
|
||||
bool restore_no_validate)
|
||||
{
|
||||
time_t dummy_time;
|
||||
TransactionId dummy_xid;
|
||||
@ -1037,6 +1038,7 @@ parseRecoveryTargetOptions(const char *target_time,
|
||||
rt->recovery_target_immediate = false;
|
||||
rt->recovery_target_name = NULL;
|
||||
rt->recovery_target_action = NULL;
|
||||
rt->restore_no_validate = false;
|
||||
|
||||
/* parse given options */
|
||||
if (target_time)
|
||||
@ -1045,7 +1047,7 @@ parseRecoveryTargetOptions(const char *target_time,
|
||||
rt->time_specified = true;
|
||||
rt->target_time_string = target_time;
|
||||
|
||||
if (parse_time(target_time, &dummy_time))
|
||||
if (parse_time(target_time, &dummy_time, false))
|
||||
rt->recovery_target_time = dummy_time;
|
||||
else
|
||||
elog(ERROR, "Invalid value of --time option %s", target_time);
|
||||
@ -1083,6 +1085,11 @@ parseRecoveryTargetOptions(const char *target_time,
|
||||
rt->recovery_target_immediate = target_immediate;
|
||||
}
|
||||
|
||||
if (restore_no_validate)
|
||||
{
|
||||
rt->restore_no_validate = restore_no_validate;
|
||||
}
|
||||
|
||||
if (target_name)
|
||||
{
|
||||
recovery_target_specified++;
|
||||
|
26
src/util.c
26
src/util.c
@ -191,7 +191,7 @@ get_data_checksum_version(bool safe)
|
||||
|
||||
|
||||
/*
|
||||
* Convert time_t value to ISO-8601 format string
|
||||
* Convert time_t value to ISO-8601 format string. Always set timezone offset.
|
||||
*/
|
||||
void
|
||||
time2iso(char *buf, size_t len, time_t time)
|
||||
@ -199,25 +199,23 @@ time2iso(char *buf, size_t len, time_t time)
|
||||
struct tm *ptm = gmtime(&time);
|
||||
time_t gmt = mktime(ptm);
|
||||
time_t offset;
|
||||
char *ptr = buf;
|
||||
|
||||
ptm = localtime(&time);
|
||||
offset = time - gmt + (ptm->tm_isdst ? 3600 : 0);
|
||||
|
||||
strftime(buf, len, "%Y-%m-%d %H:%M:%S", ptm);
|
||||
strftime(ptr, len, "%Y-%m-%d %H:%M:%S", ptm);
|
||||
|
||||
if (offset != 0)
|
||||
ptr += strlen(ptr);
|
||||
snprintf(ptr, len - (ptr - buf), "%c%02d",
|
||||
(offset >= 0) ? '+' : '-',
|
||||
abs((int) offset) / SECS_PER_HOUR);
|
||||
|
||||
if (abs((int) offset) % SECS_PER_HOUR != 0)
|
||||
{
|
||||
buf += strlen(buf);
|
||||
sprintf(buf, "%c%02d",
|
||||
(offset >= 0) ? '+' : '-',
|
||||
abs((int) offset) / SECS_PER_HOUR);
|
||||
|
||||
if (abs((int) offset) % SECS_PER_HOUR != 0)
|
||||
{
|
||||
buf += strlen(buf);
|
||||
sprintf(buf, ":%02d",
|
||||
abs((int) offset % SECS_PER_HOUR) / SECS_PER_MINUTE);
|
||||
}
|
||||
ptr += strlen(ptr);
|
||||
snprintf(ptr, len - (ptr - buf), ":%02d",
|
||||
abs((int) offset % SECS_PER_HOUR) / SECS_PER_MINUTE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -171,6 +171,10 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
|
||||
write_to_stderr = elevel >= LOG_LEVEL_CONSOLE && !file_only;
|
||||
|
||||
pthread_lock(&log_file_mutex);
|
||||
#ifdef WIN32
|
||||
std_args = NULL;
|
||||
error_args = NULL;
|
||||
#endif
|
||||
loggin_in_progress = true;
|
||||
|
||||
/* We need copy args only if we need write to error log file */
|
||||
@ -237,7 +241,6 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
|
||||
if (write_to_stderr)
|
||||
{
|
||||
write_elevel(stderr, elevel);
|
||||
|
||||
if (write_to_file)
|
||||
vfprintf(stderr, fmt, std_args);
|
||||
else
|
||||
|
@ -256,7 +256,8 @@ assign_option(pgut_option *opt, const char *optarg, pgut_optsrc src)
|
||||
message = "a valid string. But provided: ";
|
||||
break;
|
||||
case 't':
|
||||
if (parse_time(optarg, opt->var))
|
||||
if (parse_time(optarg, opt->var,
|
||||
opt->source == SOURCE_FILE))
|
||||
return;
|
||||
message = "a time";
|
||||
break;
|
||||
@ -746,9 +747,12 @@ parse_uint64(const char *value, uint64 *result, int flags)
|
||||
|
||||
/*
|
||||
* Convert ISO-8601 format string to time_t value.
|
||||
*
|
||||
* If utc_default is true, then if timezone offset isn't specified tz will be
|
||||
* +00:00.
|
||||
*/
|
||||
bool
|
||||
parse_time(const char *value, time_t *result)
|
||||
parse_time(const char *value, time_t *result, bool utc_default)
|
||||
{
|
||||
size_t len;
|
||||
int fields_num,
|
||||
@ -870,7 +874,7 @@ parse_time(const char *value, time_t *result)
|
||||
*result = mktime(&tm);
|
||||
|
||||
/* adjust time zone */
|
||||
if (tz_set)
|
||||
if (tz_set || utc_default)
|
||||
{
|
||||
time_t ltime = time(NULL);
|
||||
struct tm *ptm = gmtime(<ime);
|
||||
|
@ -204,7 +204,7 @@ extern bool parse_int32(const char *value, int32 *result, int flags);
|
||||
extern bool parse_uint32(const char *value, uint32 *result, int flags);
|
||||
extern bool parse_int64(const char *value, int64 *result, int flags);
|
||||
extern bool parse_uint64(const char *value, uint64 *result, int flags);
|
||||
extern bool parse_time(const char *value, time_t *result);
|
||||
extern bool parse_time(const char *value, time_t *result, bool utc_default);
|
||||
extern bool parse_int(const char *value, int *result, int flags,
|
||||
const char **hintmsg);
|
||||
|
||||
|
@ -60,6 +60,10 @@ def load_tests(loader, tests, pattern):
|
||||
# ptrack backup on replica should work correctly
|
||||
# archive:
|
||||
# immediate recovery and full recovery
|
||||
# backward compatibility:
|
||||
# previous version catalog must be readable by newer version
|
||||
# incremental chain from previous version can be continued
|
||||
# backups from previous version can be restored
|
||||
# 10vanilla_1.3ptrack +
|
||||
# 10vanilla+
|
||||
# 9.6vanilla_1.3ptrack +
|
||||
|
@ -29,7 +29,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
@ -45,11 +45,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node)
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_is_in_recovery()") == 't\n':
|
||||
sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
# Recreate backup calagoue
|
||||
self.init_pb(backup_dir)
|
||||
@ -65,11 +61,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["--recovery-target-action=promote"])
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_is_in_recovery()") == 't\n':
|
||||
sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
self.assertEqual(
|
||||
result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
|
||||
@ -97,7 +89,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FIRST TIMELINE
|
||||
node.safe_psql(
|
||||
@ -117,11 +109,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=['--immediate', '--recovery-target-action=promote'])
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_is_in_recovery()") == 't\n':
|
||||
sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
if self.verbose:
|
||||
print(node.safe_psql(
|
||||
"postgres",
|
||||
@ -152,11 +141,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=['--immediate', '--recovery-target-action=promote'])
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_is_in_recovery()") == 't\n':
|
||||
sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
if self.verbose:
|
||||
print(
|
||||
@ -184,11 +169,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=['--immediate', '--recovery-target-action=promote'])
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_is_in_recovery()") == 't\n':
|
||||
sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
if self.verbose:
|
||||
print('Fourth timeline')
|
||||
print(node.safe_psql(
|
||||
@ -200,10 +182,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=['--immediate', '--recovery-target-action=promote'])
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
if self.verbose:
|
||||
print('Fifth timeline')
|
||||
print(node.safe_psql(
|
||||
@ -215,10 +195,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=['--immediate', '--recovery-target-action=promote'])
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
if self.verbose:
|
||||
print('Sixth timeline')
|
||||
print(node.safe_psql(
|
||||
@ -269,7 +247,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', "archive_command = '{0} %p %f'".format(
|
||||
archive_script_path))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
@ -330,7 +308,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
f.flush()
|
||||
f.close()
|
||||
|
||||
node.start()
|
||||
node.slow_start()
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
@ -390,7 +368,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
f.flush()
|
||||
f.close()
|
||||
|
||||
node.start()
|
||||
node.slow_start()
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
@ -445,7 +423,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
# ADD INSTANCE 'MASTER'
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
@ -463,9 +441,10 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
# Settings for Replica
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
self.set_replica(master, replica, synchronous=True)
|
||||
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Check data correctness on replica
|
||||
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
@ -502,7 +481,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
@ -534,7 +513,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
@ -568,7 +547,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
# ADD INSTANCE 'MASTER'
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
self.set_archiving(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
master.psql(
|
||||
"postgres",
|
||||
@ -594,7 +573,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
# SET ARCHIVING FOR REPLICA
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# CHECK LOGICAL CORRECTNESS on REPLICA
|
||||
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
@ -649,7 +628,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
# ADD INSTANCE 'MASTER'
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
self.set_archiving(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
master.psql(
|
||||
"postgres",
|
||||
@ -676,7 +655,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
# self.add_instance(backup_dir, 'replica', replica)
|
||||
# SET ARCHIVING FOR REPLICA
|
||||
# self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# CHECK LOGICAL CORRECTNESS on REPLICA
|
||||
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
@ -771,7 +750,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
# Check data correctness
|
||||
node.cleanup()
|
||||
self.restore_node(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.assertEqual(
|
||||
result,
|
||||
node.safe_psql(
|
||||
@ -803,7 +783,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
if self.get_version(node) < self.version_to_num('10.0'):
|
||||
return unittest.skip('You need PostgreSQL 10 for this test')
|
||||
else:
|
||||
pg_receivexlog_path = node.get_bin_path('pg_receivewal')
|
||||
pg_receivexlog_path = self.get_bin_path('pg_receivewal')
|
||||
|
||||
pg_receivexlog = self.run_binary(
|
||||
[
|
||||
@ -842,7 +822,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
# Check data correctness
|
||||
node.cleanup()
|
||||
self.restore_node(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.assertEqual(
|
||||
result, node.safe_psql("postgres", "SELECT * FROM t_heap"),
|
||||
'data after restore not equal to original data')
|
||||
|
@ -83,7 +83,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
node.cleanup()
|
||||
@ -98,7 +99,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(page_result, page_result_new)
|
||||
node.cleanup()
|
||||
@ -113,7 +115,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(ptrack_result, ptrack_result_new)
|
||||
node.cleanup()
|
||||
@ -187,7 +190,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
node.cleanup()
|
||||
@ -202,7 +206,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(page_result, page_result_new)
|
||||
node.cleanup()
|
||||
@ -217,7 +222,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(ptrack_result, ptrack_result_new)
|
||||
node.cleanup()
|
||||
@ -294,7 +300,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
node.cleanup()
|
||||
@ -309,7 +316,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(page_result, page_result_new)
|
||||
node.cleanup()
|
||||
@ -324,7 +332,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(ptrack_result, ptrack_result_new)
|
||||
node.cleanup()
|
||||
@ -401,7 +410,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
node.cleanup()
|
||||
@ -416,7 +426,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(page_result, page_result_new)
|
||||
node.cleanup()
|
||||
@ -431,7 +442,8 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
ptrack_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(ptrack_result, ptrack_result_new)
|
||||
node.cleanup()
|
||||
|
@ -508,10 +508,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
restored_node.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
|
||||
restored_node.start()
|
||||
while restored_node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
restored_node.slow_start()
|
||||
|
||||
result_new = restored_node.safe_psql(
|
||||
"postgres", "select * from pgbench_accounts")
|
||||
@ -946,11 +943,8 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
node_restored.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
while node_restored.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
result_new = node_restored.safe_psql(
|
||||
"postgres", "select * from t_heap")
|
||||
|
||||
|
@ -25,6 +25,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
||||
[--replica-timeout=timeout]
|
||||
|
||||
pg_probackup show-config -B backup-dir --instance=instance_name
|
||||
[--format=format]
|
||||
|
||||
pg_probackup backup -B backup-path -b backup-mode --instance=instance_name
|
||||
[-C] [--stream [-S slot-name]] [--backup-pg-log]
|
||||
@ -57,14 +58,17 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
||||
[--immediate] [--recovery-target-name=target-name]
|
||||
[--recovery-target-action=pause|promote|shutdown]
|
||||
[--restore-as-replica]
|
||||
[--no-validate]
|
||||
|
||||
pg_probackup validate -B backup-dir [--instance=instance_name]
|
||||
[-i backup-id] [--progress]
|
||||
[--time=time|--xid=xid [--inclusive=boolean]]
|
||||
[--recovery-target-name=target-name]
|
||||
[--timeline=timeline]
|
||||
|
||||
pg_probackup show -B backup-dir
|
||||
[--instance=instance_name [-i backup-id]]
|
||||
[--format=format]
|
||||
|
||||
pg_probackup delete -B backup-dir --instance=instance_name
|
||||
[--wal] [-i backup-id | --expired]
|
||||
|
@ -191,7 +191,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
|
||||
node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
node.slow_start()
|
||||
# Logical comparison
|
||||
self.assertEqual(
|
||||
result,
|
||||
@ -290,7 +290,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
|
||||
node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
node.slow_start()
|
||||
# Logical comparison
|
||||
self.assertEqual(
|
||||
result,
|
||||
|
@ -112,6 +112,39 @@ class ProbackupException(Exception):
|
||||
return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd)
|
||||
|
||||
|
||||
def slow_start(self, replica=False):
|
||||
|
||||
# wait for https://github.com/postgrespro/testgres/pull/50
|
||||
# self.poll_query_until(
|
||||
# "postgres",
|
||||
# "SELECT not pg_is_in_recovery()",
|
||||
# raise_operational_error=False)
|
||||
|
||||
self.start()
|
||||
if not replica:
|
||||
while True:
|
||||
try:
|
||||
self.poll_query_until(
|
||||
"postgres",
|
||||
"SELECT not pg_is_in_recovery()")
|
||||
break
|
||||
except Exception as e:
|
||||
continue
|
||||
else:
|
||||
self.poll_query_until(
|
||||
"postgres",
|
||||
"SELECT pg_is_in_recovery()")
|
||||
|
||||
# while True:
|
||||
# try:
|
||||
# self.poll_query_until(
|
||||
# "postgres",
|
||||
# "SELECT pg_is_in_recovery()")
|
||||
# break
|
||||
# except ProbackupException as e:
|
||||
# continue
|
||||
|
||||
|
||||
class ProbackupTest(object):
|
||||
# Class attributes
|
||||
enterprise = is_enterprise()
|
||||
@ -205,6 +238,8 @@ class ProbackupTest(object):
|
||||
os.makedirs(real_base_dir)
|
||||
|
||||
node = testgres.get_new_node('test', base_dir=real_base_dir)
|
||||
# bound method slow_start() to 'node' class instance
|
||||
node.slow_start = slow_start.__get__(node)
|
||||
node.should_rm_dirs = True
|
||||
node.init(
|
||||
initdb_params=initdb_params, allow_streaming=set_replication)
|
||||
|
@ -3,7 +3,6 @@ import unittest
|
||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||
from datetime import datetime, timedelta
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
module_name = 'page'
|
||||
|
||||
@ -32,8 +31,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
}
|
||||
)
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname),
|
||||
)
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
@ -48,32 +46,27 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
"create table t_heap tablespace somedata as select i as id, "
|
||||
"md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,1024) i;"
|
||||
)
|
||||
"from generate_series(0,1024) i;")
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"vacuum t_heap"
|
||||
)
|
||||
"vacuum t_heap")
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"delete from t_heap where ctid >= '(11,0)'"
|
||||
)
|
||||
"delete from t_heap where ctid >= '(11,0)'")
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"vacuum t_heap"
|
||||
)
|
||||
"vacuum t_heap")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
options=['--log-level-file=verbose']
|
||||
)
|
||||
options=['--log-level-file=verbose'])
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page'
|
||||
)
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -86,8 +79,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
options=[
|
||||
"-j", "4",
|
||||
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
|
||||
"--recovery-target-action=promote"]
|
||||
)
|
||||
"--recovery-target-action=promote"])
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
@ -96,21 +88,17 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
node_restored.start()
|
||||
|
||||
while node_restored.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
node_restored.slow_start()
|
||||
|
||||
# Logical comparison
|
||||
result1 = node.safe_psql(
|
||||
"postgres",
|
||||
"select * from t_heap"
|
||||
)
|
||||
"select * from t_heap")
|
||||
|
||||
result2 = node_restored.safe_psql(
|
||||
"postgres",
|
||||
"select * from t_heap"
|
||||
)
|
||||
"select * from t_heap")
|
||||
|
||||
self.assertEqual(result1, result2)
|
||||
|
||||
# Clean after yourself
|
||||
@ -174,7 +162,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_id=full_backup_id, options=["-j", "4"]),
|
||||
'\n Unexpected Error Message: {0}\n'
|
||||
' CMD: {1}'.format(repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
node.cleanup()
|
||||
@ -187,7 +175,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_id=page_backup_id, options=["-j", "4"]),
|
||||
'\n Unexpected Error Message: {0}\n'
|
||||
' CMD: {1}'.format(repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(page_result, page_result_new)
|
||||
node.cleanup()
|
||||
@ -253,7 +241,8 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
full_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
node.cleanup()
|
||||
@ -270,7 +259,8 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
page_result_new = node.execute("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(page_result, page_result_new)
|
||||
node.cleanup()
|
||||
@ -348,10 +338,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
restored_node.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
|
||||
restored_node.start()
|
||||
while restored_node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
restored_node.slow_start()
|
||||
|
||||
result_new = restored_node.safe_psql(
|
||||
"postgres", "select * from pgbench_accounts")
|
||||
|
@ -586,10 +586,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd)
|
||||
)
|
||||
node.start()
|
||||
while node.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
node.cleanup()
|
||||
@ -611,10 +608,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
while node.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(ptrack_result, ptrack_result_new)
|
||||
|
||||
@ -691,11 +685,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd)
|
||||
)
|
||||
node.start()
|
||||
|
||||
while node.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
@ -721,10 +711,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
while node.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(ptrack_result, ptrack_result_new)
|
||||
|
||||
@ -1176,11 +1163,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
node_restored.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
while node_restored.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
result_new = node_restored.safe_psql(
|
||||
"postgres", "select * from t_heap")
|
||||
|
||||
@ -1412,10 +1396,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
restored_node.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
|
||||
restored_node.start()
|
||||
while restored_node.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
restored_node.slow_start()
|
||||
|
||||
# COMPARE LOGICAL CONTENT
|
||||
result_new = restored_node.safe_psql(
|
||||
@ -1450,11 +1431,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
restored_node.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
|
||||
restored_node.start()
|
||||
while restored_node.psql(
|
||||
"postgres",
|
||||
"select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
restored_node.slow_start()
|
||||
|
||||
result_new = restored_node.safe_psql(
|
||||
"postgres", "select * from t_heap")
|
||||
@ -1553,11 +1530,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
restored_node.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
|
||||
restored_node.start()
|
||||
while restored_node.psql(
|
||||
"postgres",
|
||||
"select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
restored_node.slow_start()
|
||||
|
||||
result_new = restored_node.safe_psql(
|
||||
"postgres",
|
||||
|
@ -46,7 +46,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
node.safe_psql('postgres', 'cluster t_heap using t_btree')
|
||||
@ -103,7 +103,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
node.safe_psql('postgres', 'cluster t_heap using t_gist')
|
||||
@ -172,7 +172,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream',
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
@ -242,7 +242,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream',
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
|
@ -45,7 +45,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
@ -100,7 +100,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j100', '--stream'])
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream'])
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
|
@ -45,7 +45,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
|
@ -43,7 +43,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'vacuum freeze t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
@ -111,7 +111,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j100',
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'vacuum freeze t_heap')
|
||||
|
@ -44,7 +44,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
node.safe_psql('postgres', 'vacuum full t_heap')
|
||||
|
@ -44,7 +44,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j100', '--stream'])
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id > 128;')
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
@ -116,7 +116,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j100',
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id > 128;')
|
||||
|
@ -50,7 +50,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_replica(master, replica)
|
||||
|
||||
# Check data correctness on replica
|
||||
replica.start(["-t", "600"])
|
||||
replica.slow_start(replica=True)
|
||||
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
|
||||
@ -82,7 +82,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
@ -113,7 +113,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
@ -143,7 +143,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'master', master)
|
||||
# force more frequent wal switch
|
||||
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
@ -166,7 +166,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
# Settings for Replica
|
||||
self.set_replica(master, replica)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start(["-t", "600"])
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Check data correctness on replica
|
||||
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
@ -200,7 +200,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
@ -231,7 +231,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
node.start()
|
||||
node.slow_start()
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
@ -260,7 +260,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'master', master)
|
||||
# force more frequent wal switch
|
||||
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
@ -287,15 +287,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
|
||||
replica.start(["-t", "600"])
|
||||
|
||||
time.sleep(1)
|
||||
self.assertEqual(
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"select exists(select * from pg_stat_replication)"
|
||||
).rstrip(),
|
||||
't')
|
||||
replica.start()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -53,10 +53,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
recovery_conf = os.path.join(node.data_dir, "recovery.conf")
|
||||
self.assertEqual(os.path.isfile(recovery_conf), True)
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
||||
self.assertEqual(before, after)
|
||||
@ -104,10 +101,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
||||
self.assertEqual(before, after)
|
||||
@ -149,11 +143,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start(params=['-t', '10'])
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||
options=['-T', '10', '-c', '2', '--no-vacuum'])
|
||||
@ -181,11 +171,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
node)["recovery_target_timeline"]
|
||||
self.assertEqual(int(recovery_target_timeline), target_tli)
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
||||
self.assertEqual(before, after)
|
||||
|
||||
@ -234,11 +220,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
||||
self.assertEqual(before, after)
|
||||
|
||||
@ -297,11 +279,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches")
|
||||
self.assertEqual(before, after)
|
||||
self.assertEqual(
|
||||
@ -366,11 +344,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
||||
self.assertEqual(before, after)
|
||||
self.assertEqual(
|
||||
@ -420,11 +394,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
||||
self.assertEqual(before, after)
|
||||
|
||||
@ -479,11 +449,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
||||
self.assertEqual(before, after)
|
||||
|
||||
@ -535,11 +501,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
after = node.execute("postgres", "SELECT * FROM pgbench_branches")
|
||||
self.assertEqual(before, after)
|
||||
|
||||
@ -602,11 +564,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
bbalance = node.execute(
|
||||
"postgres", "SELECT sum(bbalance) FROM pgbench_branches")
|
||||
delta = node.execute(
|
||||
@ -674,11 +632,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
options=["-j", "4", "--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
bbalance = node.execute(
|
||||
"postgres", "SELECT sum(bbalance) FROM pgbench_branches")
|
||||
delta = node.execute(
|
||||
@ -769,10 +723,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
result = node.execute("postgres", "SELECT id FROM test")
|
||||
self.assertEqual(result[0][0], 1)
|
||||
|
||||
@ -802,10 +754,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
result = node.execute("postgres", "SELECT id FROM test OFFSET 1")
|
||||
self.assertEqual(result[0][0], 2)
|
||||
|
||||
@ -881,10 +830,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-action=promote"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
count = node.execute("postgres", "SELECT count(*) FROM tbl")
|
||||
self.assertEqual(count[0][0], 4)
|
||||
@ -935,10 +881,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
result = node.psql("postgres", 'select * from t_heap')
|
||||
self.assertTrue('does not exist' in result[2].decode("utf-8"))
|
||||
@ -987,11 +930,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
||||
node.slow_start()
|
||||
result = node.psql("postgres", 'select * from t_heap')
|
||||
self.assertTrue('does not exist' in result[2].decode("utf-8"))
|
||||
|
||||
@ -1039,10 +978,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
result = node.psql("postgres", 'select * from t_heap')
|
||||
self.assertEqual(True, 'does not exist' in result[2].decode("utf-8"))
|
||||
@ -1097,10 +1033,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
result = node.psql("postgres", 'select * from t_heap')
|
||||
self.assertTrue('does not exist' in result[2].decode("utf-8"))
|
||||
@ -1149,10 +1082,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
"--recovery-target-name=savepoint",
|
||||
"--recovery-target-action=promote"])
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
node.slow_start()
|
||||
|
||||
result_new = node.safe_psql("postgres", "select * from t_heap")
|
||||
res = node.psql("postgres", "select * from t_heap_1")
|
||||
|
@ -748,6 +748,93 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_validate_instance_with_corrupted_full_and_try_restore(self):
|
||||
"""make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups,
|
||||
corrupt file in FULL backup and run validate on instance,
|
||||
expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN,
|
||||
try to restore backup with --no-validation option"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
|
||||
file_path_t_heap = node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_relation_filepath('t_heap')").rstrip()
|
||||
# FULL1
|
||||
backup_id_1 = self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,10000) i")
|
||||
# PAGE1
|
||||
backup_id_2 = self.backup_node(backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# PAGE2
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(20000,30000) i")
|
||||
backup_id_3 = self.backup_node(backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# FULL1
|
||||
backup_id_4 = self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
# PAGE3
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"insert into t_heap select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(30000,40000) i")
|
||||
backup_id_5 = self.backup_node(backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# Corrupt some file in FULL backup
|
||||
file_full = os.path.join(backup_dir, 'backups/node', backup_id_1, 'database', file_path_t_heap)
|
||||
with open(file_full, "rb+", 0) as f:
|
||||
f.seek(84)
|
||||
f.write(b"blah")
|
||||
f.flush()
|
||||
f.close
|
||||
|
||||
# Validate Instance
|
||||
try:
|
||||
self.validate_pb(backup_dir, 'node', options=['--log-level-file=verbose'])
|
||||
self.assertEqual(1, 0, "Expecting Error because of data files corruption.\n Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
'INFO: Validating backup {0}'.format(backup_id_1) in e.message
|
||||
and "INFO: Validate backups of the instance 'node'" in e.message
|
||||
and 'WARNING: Invalid CRC of backup file "{0}"'.format(file_full) in e.message
|
||||
and 'WARNING: Backup {0} data files are corrupted'.format(backup_id_1) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
|
||||
|
||||
self.assertEqual('CORRUPT', self.show_pb(backup_dir, 'node', backup_id_1)['status'], 'Backup STATUS should be "CORRUPT"')
|
||||
self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "ORPHAN"')
|
||||
self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"')
|
||||
self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "OK"')
|
||||
self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"')
|
||||
|
||||
node.cleanup()
|
||||
restore_out = self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["--no-validate"])
|
||||
self.assertIn(
|
||||
"INFO: Restore of backup {0} completed.".format(backup_id_5),
|
||||
restore_out,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_validate_instance_with_corrupted_full(self):
|
||||
"""make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups,
|
||||
@ -1320,7 +1407,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir)
|
||||
node2.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node2.port))
|
||||
node2.start()
|
||||
node2.slow_start()
|
||||
|
||||
timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"]
|
||||
timeline_node2 = node2.get_control_data()["Latest checkpoint's TimeLineID"]
|
||||
@ -1582,3 +1669,62 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
def test_file_size_corruption_no_validate(self):
|
||||
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
# initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
|
||||
node.start()
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select 1 as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,1000) i")
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CHECKPOINT;")
|
||||
|
||||
heap_path = node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_relation_filepath('t_heap')").rstrip()
|
||||
heap_size = node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_relation_size('t_heap')")
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="full",
|
||||
options=["-j", "4"], async=False, gdb=False)
|
||||
|
||||
node.stop()
|
||||
node.cleanup()
|
||||
|
||||
# Let`s do file corruption
|
||||
with open(os.path.join(backup_dir, "backups", 'node', backup_id, "database", heap_path), "rb+", 0) as f:
|
||||
f.truncate(int(heap_size) - 4096)
|
||||
f.flush()
|
||||
f.close
|
||||
|
||||
node.cleanup()
|
||||
|
||||
try:
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["--no-validate"])
|
||||
except ProbackupException as e:
|
||||
self.assertTrue("ERROR: Data files restoring failed" in e.message, repr(e.message))
|
||||
print "\nExpected error: \n" + e.message
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
Loading…
x
Reference in New Issue
Block a user